use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class GridDhtAtomicCache method updatePartialBatch.
/**
* @param hasNear {@code True} if originating node has near cache.
* @param firstEntryIdx Index of the first entry in the request keys collection.
* @param entries Entries to update.
* @param ver Version to set.
* @param nearNode Originating node.
* @param writeVals Write values.
* @param putMap Values to put.
* @param rmvKeys Keys to remove.
* @param entryProcessorMap Entry processors.
* @param dhtFut DHT update future if has backups.
* @param req Request.
* @param res Response.
* @param replicate Whether replication is enabled.
* @param batchRes Batch update result.
* @param taskName Task name.
* @param expiry Expiry policy.
* @param sndPrevVal If {@code true} sends previous value to backups.
* @return Deleted entries.
*/
@SuppressWarnings("ForLoopReplaceableByForEach")
@Nullable
private GridDhtAtomicAbstractUpdateFuture updatePartialBatch(final boolean hasNear, final int firstEntryIdx, final List<GridDhtCacheEntry> entries, final GridCacheVersion ver, final ClusterNode nearNode, @Nullable final List<CacheObject> writeVals, @Nullable final Map<KeyCacheObject, CacheObject> putMap, @Nullable final Collection<KeyCacheObject> rmvKeys, @Nullable final Map<KeyCacheObject, EntryProcessor<Object, Object, Object>> entryProcessorMap, @Nullable GridDhtAtomicAbstractUpdateFuture dhtFut, final GridNearAtomicAbstractUpdateRequest req, final GridNearAtomicUpdateResponse res, final boolean replicate, final DhtAtomicUpdateResult batchRes, final String taskName, @Nullable final IgniteCacheExpiryPolicy expiry, final boolean sndPrevVal) {
assert putMap == null ^ rmvKeys == null;
assert req.conflictVersions() == null : "Cannot be called when there are conflict entries in the batch.";
AffinityTopologyVersion topVer = req.topologyVersion();
CacheStorePartialUpdateException storeErr = null;
try {
GridCacheOperation op;
if (putMap != null) {
try {
Map<? extends KeyCacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>> view = F.viewReadOnly(putMap, new C1<CacheObject, IgniteBiTuple<? extends CacheObject, GridCacheVersion>>() {
@Override
public IgniteBiTuple<? extends CacheObject, GridCacheVersion> apply(CacheObject val) {
return F.t(val, ver);
}
});
ctx.store().putAll(null, view);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = UPDATE;
} else {
try {
ctx.store().removeAll(null, rmvKeys);
} catch (CacheStorePartialUpdateException e) {
storeErr = e;
}
op = DELETE;
}
boolean intercept = ctx.config().getInterceptor() != null;
AffinityAssignment affAssignment = ctx.affinity().assignment(topVer);
// Avoid iterator creation.
for (int i = 0; i < entries.size(); i++) {
GridDhtCacheEntry entry = entries.get(i);
assert entry.lockedByCurrentThread();
if (entry.obsolete()) {
assert req.operation() == DELETE : "Entry can become obsolete only after remove: " + entry;
continue;
}
if (storeErr != null && storeErr.failedKeys().contains(entry.key().value(ctx.cacheObjectContext(), false)))
continue;
try {
// We are holding java-level locks on entries at this point.
CacheObject writeVal = op == UPDATE ? writeVals.get(i) : null;
assert writeVal != null || op == DELETE : "null write value found.";
// Get readers before innerUpdate (reader cleared after remove).
GridDhtCacheEntry.ReaderId[] readers = entry.readersLocked();
GridCacheUpdateAtomicResult updRes = entry.innerUpdate(ver, nearNode.id(), locNodeId, op, writeVal, null, /*write-through*/
false, /*read-through*/
false, /*retval*/
sndPrevVal, req.keepBinary(), expiry, /*event*/
true, /*metrics*/
true, /*primary*/
true, /*verCheck*/
false, topVer, null, replicate ? DR_PRIMARY : DR_NONE, CU.TTL_NOT_CHANGED, CU.EXPIRE_TIME_CALCULATE, null, /*conflict resolve*/
false, /*intercept*/
false, req.subjectId(), taskName, null, null, dhtFut);
assert !updRes.success() || updRes.newTtl() == CU.TTL_NOT_CHANGED || expiry != null : "success=" + updRes.success() + ", newTtl=" + updRes.newTtl() + ", expiry=" + expiry;
if (intercept) {
if (op == UPDATE) {
ctx.config().getInterceptor().onAfterPut(new CacheLazyEntry(ctx, entry.key(), updRes.newValue(), req.keepBinary()));
} else {
assert op == DELETE : op;
// Old value should be already loaded for 'CacheInterceptor.onBeforeRemove'.
ctx.config().getInterceptor().onAfterRemove(new CacheLazyEntry(ctx, entry.key(), updRes.oldValue(), req.keepBinary()));
}
}
batchRes.addDeleted(entry, updRes, entries);
if (dhtFut != null) {
EntryProcessor<Object, Object, Object> entryProcessor = entryProcessorMap == null ? null : entryProcessorMap.get(entry.key());
dhtFut.addWriteEntry(affAssignment, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE, null, sndPrevVal, updRes.oldValue(), updRes.updateCounter());
if (readers != null)
dhtFut.addNearWriteEntries(nearNode, readers, entry, writeVal, entryProcessor, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
}
if (hasNear) {
if (!ctx.affinity().partitionBelongs(nearNode, entry.partition(), topVer)) {
int idx = firstEntryIdx + i;
if (req.operation() == TRANSFORM) {
res.addNearValue(idx, writeVal, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
} else
res.addNearTtl(idx, updRes.newTtl(), CU.EXPIRE_TIME_CALCULATE);
if (writeVal != null || entry.hasValue()) {
IgniteInternalFuture<Boolean> f = entry.addReader(nearNode.id(), req.messageId(), topVer);
assert f == null : f;
}
} else if (GridDhtCacheEntry.ReaderId.contains(readers, nearNode.id())) {
// Reader became primary or backup.
entry.removeReader(nearNode.id(), req.messageId());
} else
res.addSkippedIndex(firstEntryIdx + i);
}
} catch (GridCacheEntryRemovedException e) {
assert false : "Entry cannot become obsolete while holding lock.";
e.printStackTrace();
}
}
} catch (IgniteCheckedException e) {
res.addFailedKeys(putMap != null ? putMap.keySet() : rmvKeys, e);
}
if (storeErr != null) {
ArrayList<KeyCacheObject> failed = new ArrayList<>(storeErr.failedKeys().size());
for (Object failedKey : storeErr.failedKeys()) failed.add(ctx.toCacheKeyObject(failedKey));
res.addFailedKeys(failed, storeErr.getCause());
}
return dhtFut;
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class AbstractWalRecordsIterator method advanceRecord.
/**
* Switches to new record
* @param hnd currently opened read handle
* @return next advanced record
*/
private IgniteBiTuple<WALPointer, WALRecord> advanceRecord(@Nullable final AbstractReadFileHandle hnd) throws IgniteCheckedException {
if (hnd == null)
return null;
FileWALPointer actualFilePtr = new FileWALPointer(hnd.idx(), (int) hnd.in().position(), 0);
try {
WALRecord rec = hnd.ser().readRecord(hnd.in(), actualFilePtr);
actualFilePtr.length(rec.size());
// cast using diamond operator here can break compile for 7
return new IgniteBiTuple<>((WALPointer) actualFilePtr, postProcessRecord(rec));
} catch (IOException | IgniteCheckedException e) {
if (e instanceof WalSegmentTailReachedException)
throw (WalSegmentTailReachedException) e;
if (!(e instanceof SegmentEofException))
handleRecordException(e, actualFilePtr);
return null;
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class FsyncModeFileWriteAheadLogManager method readSerializerVersionAndCompactedFlag.
/**
* Reads record serializer version from provided {@code io} along with compacted flag.
* NOTE: Method mutates position of {@code io}.
*
* @param io I/O interface for file.
* @return Serializer version stored in the file.
* @throws IgniteCheckedException If failed to read serializer version.
*/
public static IgniteBiTuple<Integer, Boolean> readSerializerVersionAndCompactedFlag(FileIO io) throws IgniteCheckedException, IOException {
try (ByteBufferExpander buf = new ByteBufferExpander(RecordV1Serializer.HEADER_RECORD_SIZE, ByteOrder.nativeOrder())) {
FileInput in = new FileInput(io, buf);
in.ensure(RecordV1Serializer.HEADER_RECORD_SIZE);
int recordType = in.readUnsignedByte();
if (recordType == WALRecord.RecordType.STOP_ITERATION_RECORD_TYPE)
throw new SegmentEofException("Reached logical end of the segment", null);
WALRecord.RecordType type = WALRecord.RecordType.fromOrdinal(recordType - 1);
if (type != WALRecord.RecordType.HEADER_RECORD)
throw new IOException("Can't read serializer version", null);
// Read file pointer.
FileWALPointer ptr = RecordV1Serializer.readPosition(in);
assert ptr.fileOffset() == 0 : "Header record should be placed at the beginning of file " + ptr;
long hdrMagicNum = in.readLong();
boolean compacted;
if (hdrMagicNum == HeaderRecord.REGULAR_MAGIC)
compacted = false;
else if (hdrMagicNum == HeaderRecord.COMPACTED_MAGIC)
compacted = true;
else {
throw new IOException("Magic is corrupted [exp=" + U.hexLong(HeaderRecord.REGULAR_MAGIC) + ", actual=" + U.hexLong(hdrMagicNum) + ']');
}
// Read serializer version.
int ver = in.readInt();
// Read and skip CRC.
in.readInt();
return new IgniteBiTuple<>(ver, compacted);
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class GridCacheQueryManager method runQuery.
/**
* Processes cache query request.
*
* @param qryInfo Query info.
*/
@SuppressWarnings("unchecked")
protected void runQuery(GridCacheQueryInfo qryInfo) {
assert qryInfo != null;
assert qryInfo.query().type() != SCAN || !qryInfo.local() : qryInfo;
if (!enterBusy()) {
if (cctx.localNodeId().equals(qryInfo.senderId()))
throw new IllegalStateException("Failed to process query request (grid is stopping).");
// Ignore remote requests when when node is stopping.
return;
}
try {
boolean loc = qryInfo.local();
QueryResult<K, V> res = null;
if (log.isDebugEnabled())
log.debug("Running query: " + qryInfo);
boolean rmvIter = true;
GridCacheQueryAdapter<?> qry = qryInfo.query();
try {
// Preparing query closures.
IgniteClosure<Cache.Entry<K, V>, Object> trans = (IgniteClosure<Cache.Entry<K, V>, Object>) qryInfo.transformer();
IgniteReducer<Cache.Entry<K, V>, Object> rdc = (IgniteReducer<Cache.Entry<K, V>, Object>) qryInfo.reducer();
injectResources(trans);
injectResources(rdc);
int pageSize = qry.pageSize();
boolean incBackups = qry.includeBackups();
String taskName = cctx.kernalContext().task().resolveTaskName(qry.taskHash());
IgniteSpiCloseableIterator iter;
GridCacheQueryType type;
res = loc ? executeQuery(qry, qryInfo.arguments(), trans, loc, qry.subjectId(), taskName, recipient(qryInfo.senderId(), qryInfo.requestId())) : queryResult(qryInfo, taskName);
if (res == null)
return;
iter = res.iterator(recipient(qryInfo.senderId(), qryInfo.requestId()));
type = res.type();
final GridCacheAdapter<K, V> cache = cctx.cache();
if (log.isDebugEnabled())
log.debug("Received index iterator [iterHasNext=" + iter.hasNext() + ", cacheSize=" + cache.size() + ']');
int cnt = 0;
boolean stop = false;
boolean pageSent = false;
Collection<Object> data = new ArrayList<>(pageSize);
AffinityTopologyVersion topVer = cctx.affinity().affinityTopologyVersion();
final boolean statsEnabled = cctx.statisticsEnabled();
final boolean readEvt = cctx.events().isRecordable(EVT_CACHE_QUERY_OBJECT_READ);
CacheObjectContext objCtx = cctx.cacheObjectContext();
while (!Thread.currentThread().isInterrupted()) {
long start = statsEnabled ? System.nanoTime() : 0L;
// actual row extracting may happen inside this method.
if (!iter.hasNext())
break;
Object row0 = iter.next();
// Query is cancelled.
if (row0 == null) {
onPageReady(loc, qryInfo, null, true, null);
break;
}
if (type == SCAN)
// Scan iterator may return already transformed entry
data.add(row0);
else {
IgniteBiTuple<K, V> row = (IgniteBiTuple<K, V>) row0;
final K key = row.getKey();
V val = row.getValue();
if (log.isDebugEnabled()) {
ClusterNode primaryNode = cctx.affinity().primaryByKey(key, cctx.affinity().affinityTopologyVersion());
log.debug(S.toString("Record", "key", key, true, "val", val, true, "incBackups", incBackups, false, "priNode", primaryNode != null ? U.id8(primaryNode.id()) : null, false, "node", U.id8(cctx.localNode().id()), false));
}
if (val == null) {
if (log.isDebugEnabled())
log.debug(S.toString("Unsuitable record value", "val", val, true));
continue;
}
if (statsEnabled) {
CacheMetricsImpl metrics = cctx.cache().metrics0();
metrics.onRead(true);
metrics.addGetTimeNanos(System.nanoTime() - start);
}
K key0 = null;
V val0 = null;
if (readEvt && cctx.gridEvents().hasListener(EVT_CACHE_QUERY_OBJECT_READ)) {
key0 = (K) CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false);
val0 = (V) CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false);
switch(type) {
case SQL:
cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "SQL query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.SQL.name(), cctx.name(), qry.queryClassName(), qry.clause(), null, null, qryInfo.arguments(), qry.subjectId(), taskName, key0, val0, null, null));
break;
case TEXT:
cctx.gridEvents().record(new CacheQueryReadEvent<>(cctx.localNode(), "Full text query entry read.", EVT_CACHE_QUERY_OBJECT_READ, CacheQueryType.FULL_TEXT.name(), cctx.name(), qry.queryClassName(), qry.clause(), null, null, null, qry.subjectId(), taskName, key0, val0, null, null));
break;
}
}
if (rdc != null) {
if (key0 == null)
key0 = (K) CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, key, qry.keepBinary(), false);
if (val0 == null)
val0 = (V) CacheObjectUtils.unwrapBinaryIfNeeded(objCtx, val, qry.keepBinary(), false);
Cache.Entry<K, V> entry = new CacheEntryImpl(key0, val0);
// Reduce.
if (!rdc.collect(entry) || !iter.hasNext()) {
onPageReady(loc, qryInfo, Collections.singletonList(rdc.reduce()), true, null);
pageSent = true;
break;
} else
continue;
} else
data.add(!loc ? new GridCacheQueryResponseEntry<>(key, val) : F.t(key, val));
}
if (!loc) {
if (++cnt == pageSize || !iter.hasNext()) {
boolean finished = !iter.hasNext();
onPageReady(loc, qryInfo, data, finished, null);
pageSent = true;
if (!finished)
rmvIter = false;
if (!qryInfo.allPages())
return;
data = new ArrayList<>(pageSize);
if (stop)
// while
break;
}
}
}
if (!pageSent) {
if (rdc == null)
onPageReady(loc, qryInfo, data, true, null);
else
onPageReady(loc, qryInfo, Collections.singletonList(rdc.reduce()), true, null);
}
} catch (Throwable e) {
if (X.hasCause(e, ClassNotFoundException.class) && !qry.keepBinary() && cctx.binaryMarshaller() && !cctx.localNode().isClient() && !log.isQuiet()) {
LT.warn(log, "Suggestion for the cause of ClassNotFoundException");
LT.warn(log, "To disable, set -D" + IGNITE_QUIET + "=true");
LT.warn(log, " ^-- Ignite configured to use BinaryMarshaller but keepBinary is false for " + "request");
LT.warn(log, " ^-- Server node need to load definition of data classes. " + "It can be reason of ClassNotFoundException(consider IgniteCache.withKeepBinary to fix)");
LT.warn(log, "Refer this page for detailed information: " + "https://apacheignite.readme.io/docs/binary-marshaller");
}
if (!X.hasCause(e, GridDhtUnreservedPartitionException.class))
U.error(log, "Failed to run query [qry=" + qryInfo + ", node=" + cctx.nodeId() + "]", e);
onPageReady(loc, qryInfo, null, true, e);
if (e instanceof Error)
throw (Error) e;
} finally {
if (loc) {
// Local iterators are always removed.
if (res != null) {
try {
res.closeIfNotShared(recipient(qryInfo.senderId(), qryInfo.requestId()));
} catch (IgniteCheckedException e) {
if (!X.hasCause(e, GridDhtUnreservedPartitionException.class))
U.error(log, "Failed to close local iterator [qry=" + qryInfo + ", node=" + cctx.nodeId() + "]", e);
}
}
} else if (rmvIter)
removeQueryResult(qryInfo.senderId(), qryInfo.requestId());
}
} finally {
leaveBusy();
}
}
use of org.apache.ignite.lang.IgniteBiTuple in project ignite by apache.
the class GridCacheDatabaseSharedManager method finalizeCheckpointOnRecovery.
/**
* @throws IgniteCheckedException If failed.
*/
private void finalizeCheckpointOnRecovery(long cpTs, UUID cpId, WALPointer walPtr) throws IgniteCheckedException {
assert cpTs != 0;
ByteBuffer tmpWriteBuf = ByteBuffer.allocateDirect(pageSize());
long start = System.currentTimeMillis();
Collection<DataRegion> memPolicies = context().database().dataRegions();
List<IgniteBiTuple<PageMemory, Collection<FullPageId>>> cpEntities = new ArrayList<>(memPolicies.size());
for (DataRegion memPlc : memPolicies) {
if (memPlc.config().isPersistenceEnabled()) {
PageMemoryEx pageMem = (PageMemoryEx) memPlc.pageMemory();
cpEntities.add(new IgniteBiTuple<PageMemory, Collection<FullPageId>>(pageMem, (pageMem).beginCheckpoint()));
}
}
tmpWriteBuf.order(ByteOrder.nativeOrder());
// Identity stores set.
Collection<PageStore> updStores = new HashSet<>();
int cpPagesCnt = 0;
for (IgniteBiTuple<PageMemory, Collection<FullPageId>> e : cpEntities) {
PageMemoryEx pageMem = (PageMemoryEx) e.get1();
Collection<FullPageId> cpPages = e.get2();
cpPagesCnt += cpPages.size();
for (FullPageId fullId : cpPages) {
tmpWriteBuf.rewind();
Integer tag = pageMem.getForCheckpoint(fullId, tmpWriteBuf, null);
if (tag != null) {
tmpWriteBuf.rewind();
PageStore store = storeMgr.writeInternal(fullId.groupId(), fullId.pageId(), tmpWriteBuf, tag, true);
tmpWriteBuf.rewind();
updStores.add(store);
}
}
}
long written = U.currentTimeMillis();
for (PageStore updStore : updStores) updStore.sync();
long fsync = U.currentTimeMillis();
for (IgniteBiTuple<PageMemory, Collection<FullPageId>> e : cpEntities) ((PageMemoryEx) e.get1()).finishCheckpoint();
writeCheckpointEntry(tmpWriteBuf, cpTs, cpId, walPtr, null, CheckpointEntryType.END);
cctx.pageStore().finishRecover();
if (log.isInfoEnabled())
log.info(String.format("Checkpoint finished [cpId=%s, pages=%d, markPos=%s, " + "pagesWrite=%dms, fsync=%dms, total=%dms]", cpId, cpPagesCnt, walPtr, written - start, fsync - written, fsync - start));
}
Aggregations