use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class GridQueryProcessor method remove.
/**
* @param cacheName Cache name.
* @param key Key.
* @throws IgniteCheckedException Thrown in case of any errors.
*/
public void remove(String cacheName, KeyCacheObject key, int partId, CacheObject val, GridCacheVersion ver) throws IgniteCheckedException {
assert key != null;
if (log.isDebugEnabled())
log.debug("Remove [cacheName=" + cacheName + ", key=" + key + ", val=" + val + "]");
if (idx == null)
return;
if (!busyLock.enterBusy())
throw new IllegalStateException("Failed to remove from index (grid is stopping).");
try {
CacheObjectContext coctx = cacheObjectContext(cacheName);
QueryTypeDescriptorImpl desc = typeByValue(cacheName, coctx, key, val, false);
if (desc == null)
return;
idx.remove(cacheName, desc, key, partId, val, ver);
} finally {
busyLock.leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class GridQueryProcessor method store.
/**
* Writes key-value pair to index.
*
* @param cacheName Cache name.
* @param key Key.
* @param val Value.
* @param ver Cache entry version.
* @param expirationTime Expiration time or 0 if never expires.
* @throws IgniteCheckedException In case of error.
*/
@SuppressWarnings({ "unchecked", "ConstantConditions" })
public void store(final String cacheName, final KeyCacheObject key, int partId, @Nullable CacheObject prevVal, @Nullable GridCacheVersion prevVer, final CacheObject val, GridCacheVersion ver, long expirationTime, long link) throws IgniteCheckedException {
assert key != null;
assert val != null;
if (log.isDebugEnabled())
log.debug("Store [cache=" + cacheName + ", key=" + key + ", val=" + val + "]");
if (idx == null)
return;
if (!busyLock.enterBusy())
throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
try {
CacheObjectContext coctx = cacheObjectContext(cacheName);
QueryTypeDescriptorImpl desc = typeByValue(cacheName, coctx, key, val, true);
if (prevVal != null) {
QueryTypeDescriptorImpl prevValDesc = typeByValue(cacheName, coctx, key, prevVal, false);
if (prevValDesc != null && prevValDesc != desc)
idx.remove(cacheName, prevValDesc, key, partId, prevVal, prevVer);
}
if (desc == null)
return;
idx.store(cacheName, desc, key, partId, val, ver, expirationTime, link);
} finally {
busyLock.leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class GridCacheQueryManager method store.
/**
* Writes key-value pair to index.
*
* @param key Key.
* @param partId Partition.
* @param prevVal Previous value.
* @param prevVer Previous version.
* @param val Value.
* @param ver Cache entry version.
* @param expirationTime Expiration time or 0 if never expires.
* @param link Link.
* @throws IgniteCheckedException In case of error.
*/
public void store(KeyCacheObject key, int partId, @Nullable CacheObject prevVal, @Nullable GridCacheVersion prevVer, CacheObject val, GridCacheVersion ver, long expirationTime, long link) throws IgniteCheckedException {
assert key != null;
assert val != null;
assert enabled();
if (key instanceof GridCacheInternal)
// No-op.
return;
if (!enterBusy())
throw new NodeStoppingException("Operation has been cancelled (node is stopping).");
try {
if (isIndexingSpiEnabled()) {
CacheObjectContext coctx = cctx.cacheObjectContext();
Object key0 = unwrapIfNeeded(key, coctx);
Object val0 = unwrapIfNeeded(val, coctx);
cctx.kernalContext().indexing().store(cacheName, key0, val0, expirationTime);
}
if (qryProcEnabled)
qryProc.store(cacheName, key, partId, prevVal, prevVer, val, ver, expirationTime, link);
} finally {
invalidateResultCache();
leaveBusy();
}
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class CacheDataRowAdapter method initFromLink.
/**
* Read row from data pages.
* Can be called with cctx == null, if cache instance is unknown, but its ID is stored in the data row.
*
* @param grp Cache group.
* @param sharedCtx Shared context.
* @param pageMem Page memory.
* @param rowData Row data.
* @throws IgniteCheckedException If failed.
*/
public final void initFromLink(@Nullable CacheGroupContext grp, GridCacheSharedContext<?, ?> sharedCtx, PageMemory pageMem, RowData rowData) throws IgniteCheckedException {
assert link != 0 : "link";
assert key == null : "key";
CacheObjectContext coctx = grp != null ? grp.cacheObjectContext() : null;
boolean readCacheId = grp == null || grp.storeCacheIdInDataPage();
long nextLink = link;
IncompleteObject<?> incomplete = null;
boolean first = true;
do {
final long pageId = pageId(nextLink);
// Group is null if try evict page, with persistence evictions should be disabled.
assert grp != null || pageMem instanceof PageMemoryNoStoreImpl;
int grpId = grp != null ? grp.groupId() : 0;
final long page = pageMem.acquirePage(grpId, pageId);
try {
// Non-empty data page must not be recycled.
long pageAddr = pageMem.readLock(grpId, pageId, page);
assert pageAddr != 0L : nextLink;
try {
DataPageIO io = DataPageIO.VERSIONS.forPage(pageAddr);
DataPagePayload data = io.readPayload(pageAddr, itemId(nextLink), pageMem.pageSize());
nextLink = data.nextLink();
if (first) {
if (nextLink == 0) {
// Fast path for a single page row.
readFullRow(sharedCtx, coctx, pageAddr + data.offset(), rowData, readCacheId);
return;
}
first = false;
}
ByteBuffer buf = pageMem.pageBuffer(pageAddr);
buf.position(data.offset());
buf.limit(data.offset() + data.payloadSize());
boolean keyOnly = rowData == RowData.KEY_ONLY;
incomplete = readFragment(sharedCtx, coctx, buf, keyOnly, readCacheId, incomplete);
if (keyOnly && key != null)
return;
} finally {
pageMem.readUnlock(grpId, pageId, page);
}
} finally {
pageMem.releasePage(grpId, pageId, page);
}
} while (nextLink != 0);
assert isReady() : "ready";
}
use of org.apache.ignite.internal.processors.cache.CacheObjectContext in project ignite by apache.
the class IgnitePdsCheckpointSimulationWithRealCpDisabledTest method testDataWalEntries.
/**
* @throws Exception if failed.
*/
public void testDataWalEntries() throws Exception {
IgniteEx ig = startGrid(0);
ig.active(true);
GridCacheSharedContext<Object, Object> sharedCtx = ig.context().cache().context();
GridCacheContext<Object, Object> cctx = sharedCtx.cache().cache(cacheName).context();
GridCacheDatabaseSharedManager db = (GridCacheDatabaseSharedManager) sharedCtx.database();
IgniteWriteAheadLogManager wal = sharedCtx.wal();
assertTrue(wal.isAlwaysWriteFullPages());
db.enableCheckpoints(false).get();
final int cnt = 10;
List<DataEntry> entries = new ArrayList<>(cnt);
for (int i = 0; i < cnt; i++) {
GridCacheOperation op = i % 2 == 0 ? GridCacheOperation.UPDATE : GridCacheOperation.DELETE;
KeyCacheObject key = cctx.toCacheKeyObject(i);
CacheObject val = null;
if (op != GridCacheOperation.DELETE)
val = cctx.toCacheObject("value-" + i);
entries.add(new DataEntry(cctx.cacheId(), key, val, op, null, cctx.versions().next(), 0L, cctx.affinity().partition(i), i));
}
UUID cpId = UUID.randomUUID();
WALPointer start = wal.log(new CheckpointRecord(cpId, null));
wal.fsync(start);
for (DataEntry entry : entries) wal.log(new DataRecord(entry));
// Data will not be written to the page store.
stopAllGrids();
ig = startGrid(0);
ig.active(true);
sharedCtx = ig.context().cache().context();
cctx = sharedCtx.cache().cache(cacheName).context();
db = (GridCacheDatabaseSharedManager) sharedCtx.database();
wal = sharedCtx.wal();
db.enableCheckpoints(false).get();
try (PartitionMetaStateRecordExcludeIterator it = new PartitionMetaStateRecordExcludeIterator(wal.replay(start))) {
IgniteBiTuple<WALPointer, WALRecord> cpRecordTup = it.next();
assert cpRecordTup.get2() instanceof CheckpointRecord;
assertEquals(start, cpRecordTup.get1());
CheckpointRecord cpRec = (CheckpointRecord) cpRecordTup.get2();
assertEquals(cpId, cpRec.checkpointId());
assertNull(cpRec.checkpointMark());
assertFalse(cpRec.end());
int idx = 0;
CacheObjectContext coctx = cctx.cacheObjectContext();
while (idx < entries.size()) {
IgniteBiTuple<WALPointer, WALRecord> dataRecTup = it.next();
assert dataRecTup.get2() instanceof DataRecord;
DataRecord dataRec = (DataRecord) dataRecTup.get2();
DataEntry entry = entries.get(idx);
assertEquals(1, dataRec.writeEntries().size());
DataEntry readEntry = dataRec.writeEntries().get(0);
assertEquals(entry.cacheId(), readEntry.cacheId());
assertEquals(entry.key().<Integer>value(coctx, true), readEntry.key().<Integer>value(coctx, true));
assertEquals(entry.op(), readEntry.op());
if (entry.op() == GridCacheOperation.UPDATE)
assertEquals(entry.value().value(coctx, true), readEntry.value().value(coctx, true));
else
assertNull(entry.value());
assertEquals(entry.writeVersion(), readEntry.writeVersion());
assertEquals(entry.nearXidVersion(), readEntry.nearXidVersion());
assertEquals(entry.partitionCounter(), readEntry.partitionCounter());
idx++;
}
}
}
Aggregations