use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCacheMessage method marshalTx.
/**
* @param txEntries Entries to marshal.
* @param ctx Context.
* @throws IgniteCheckedException If failed.
*/
protected final void marshalTx(Iterable<IgniteTxEntry> txEntries, GridCacheSharedContext ctx) throws IgniteCheckedException {
assert ctx != null;
if (txEntries != null) {
boolean transferExpiry = transferExpiryPolicy();
boolean p2pEnabled = ctx.deploymentEnabled();
for (IgniteTxEntry e : txEntries) {
e.marshal(ctx, transferExpiry);
GridCacheContext cctx = e.context();
if (addDepInfo) {
if (e.key() != null)
prepareObject(e.key().value(cctx.cacheObjectContext(), false), ctx);
if (e.value() != null)
prepareObject(e.value().value(cctx.cacheObjectContext(), false), ctx);
if (e.entryProcessors() != null) {
for (T2<EntryProcessor<Object, Object, Object>, Object[]> entProc : e.entryProcessors()) prepareObject(entProc.get1(), ctx);
}
} else if (p2pEnabled && e.entryProcessors() != null) {
if (!forceAddDepInfo)
forceAddDepInfo = true;
for (T2<EntryProcessor<Object, Object, Object>, Object[]> entProc : e.entryProcessors()) prepareObject(entProc.get1(), ctx);
}
}
}
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridLocalAtomicCache method updateAllInternal.
/**
* Entry point for all public update methods (put, remove, invoke).
*
* @param op Operation.
* @param keys Keys.
* @param vals Values.
* @param invokeArgs Optional arguments for EntryProcessor.
* @param expiryPlc Expiry policy.
* @param retval Return value required flag.
* @param rawRetval Return {@code GridCacheReturn} instance.
* @param filter Cache entry filter.
* @param writeThrough Write through.
* @param readThrough Read through.
* @return Update result.
* @throws IgniteCheckedException If failed.
*/
@SuppressWarnings("unchecked")
private Object updateAllInternal(GridCacheOperation op, Collection<? extends K> keys, @Nullable Iterable<?> vals, @Nullable Object[] invokeArgs, @Nullable ExpiryPolicy expiryPlc, boolean retval, boolean rawRetval, CacheEntryPredicate filter, boolean writeThrough, boolean readThrough, boolean keepBinary) throws IgniteCheckedException {
if (keyCheck)
validateCacheKeys(keys);
if (op == DELETE)
ctx.checkSecurity(SecurityPermission.CACHE_REMOVE);
else
ctx.checkSecurity(SecurityPermission.CACHE_PUT);
String taskName = ctx.kernalContext().job().currentTaskName();
GridCacheVersion ver = ctx.versions().next();
UUID subjId = ctx.subjectIdPerCall(null);
CacheEntryPredicate[] filters = CU.filterArray(filter);
IgniteBiTuple<Boolean, ?> res = null;
CachePartialUpdateCheckedException err = null;
ctx.shared().database().checkpointReadLock();
try {
ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
if (writeThrough && keys.size() > 1) {
return updateWithBatch(op, keys, vals, invokeArgs, expiryPlc, ver, filters, keepBinary, subjId, taskName);
}
Iterator<?> valsIter = vals != null ? vals.iterator() : null;
boolean intercept = ctx.config().getInterceptor() != null;
for (K key : keys) {
if (key == null)
throw new NullPointerException("Null key.");
Object val = valsIter != null ? valsIter.next() : null;
if (val == null && op != DELETE)
throw new NullPointerException("Null value.");
KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
if (op == UPDATE) {
val = ctx.toCacheObject(val);
ctx.validateKeyAndValue(cacheKey, (CacheObject) val);
} else if (op == TRANSFORM)
ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name());
while (true) {
GridCacheEntryEx entry = null;
try {
entry = entryEx(cacheKey);
GridTuple3<Boolean, Object, EntryProcessorResult<Object>> t = entry.innerUpdateLocal(ver, val == null ? DELETE : op, val, invokeArgs, writeThrough, readThrough, retval, keepBinary, expiryPlc, true, true, filters, intercept, subjId, taskName);
if (op == TRANSFORM) {
if (t.get3() != null) {
Map<K, EntryProcessorResult> computedMap;
if (res == null) {
computedMap = U.newHashMap(keys.size());
res = new IgniteBiTuple<>(true, computedMap);
} else
computedMap = (Map<K, EntryProcessorResult>) res.get2();
computedMap.put(key, t.get3());
}
} else if (res == null)
res = new T2(t.get1(), t.get2());
// While.
break;
} catch (GridCacheEntryRemovedException ignored) {
if (log.isDebugEnabled())
log.debug("Got removed entry while updating (will retry): " + key);
entry = null;
} catch (IgniteCheckedException e) {
if (err == null)
err = partialUpdateException();
err.add(F.asList(key), e);
U.error(log, "Failed to update key : " + key, e);
break;
} finally {
if (entry != null)
ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
}
}
}
} finally {
ctx.shared().database().checkpointReadUnlock();
}
if (err != null)
throw err;
Object ret = res == null ? null : rawRetval ? new GridCacheReturn(ctx, true, keepBinary, res.get2(), res.get1()) : (retval || op == TRANSFORM) ? res.get2() : res.get1();
if (op == TRANSFORM && ret == null)
ret = Collections.emptyMap();
return ret;
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCacheDatabaseSharedManager method applyLastUpdates.
/**
* @param status Last registered checkpoint status.
* @throws IgniteCheckedException If failed to apply updates.
* @throws StorageException If IO exception occurred while reading write-ahead log.
*/
private void applyLastUpdates(CheckpointStatus status, boolean metastoreOnly) throws IgniteCheckedException {
if (log.isInfoEnabled())
log.info("Applying lost cache updates since last checkpoint record [lastMarked=" + status.startPtr + ", lastCheckpointId=" + status.cpStartId + ']');
if (!metastoreOnly)
cctx.kernalContext().query().skipFieldLookup(true);
long start = U.currentTimeMillis();
int applied = 0;
Collection<Integer> ignoreGrps = metastoreOnly ? Collections.emptySet() : initiallyWalDisabledGrps;
try (WALIterator it = cctx.wal().replay(status.startPtr)) {
Map<T2<Integer, Integer>, T2<Integer, Long>> partStates = new HashMap<>();
while (it.hasNextX()) {
IgniteBiTuple<WALPointer, WALRecord> next = it.nextX();
WALRecord rec = next.get2();
switch(rec.type()) {
case DATA_RECORD:
if (metastoreOnly)
continue;
DataRecord dataRec = (DataRecord) rec;
for (DataEntry dataEntry : dataRec.writeEntries()) {
int cacheId = dataEntry.cacheId();
int grpId = cctx.cache().cacheDescriptor(cacheId).groupId();
if (!ignoreGrps.contains(grpId)) {
GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
applyUpdate(cacheCtx, dataEntry);
applied++;
}
}
break;
case PART_META_UPDATE_STATE:
if (metastoreOnly)
continue;
PartitionMetaStateRecord metaStateRecord = (PartitionMetaStateRecord) rec;
if (!ignoreGrps.contains(metaStateRecord.groupId())) {
partStates.put(new T2<>(metaStateRecord.groupId(), metaStateRecord.partitionId()), new T2<>((int) metaStateRecord.state(), metaStateRecord.updateCounter()));
}
break;
case METASTORE_DATA_RECORD:
MetastoreDataRecord metastoreDataRecord = (MetastoreDataRecord) rec;
metaStorage.applyUpdate(metastoreDataRecord.key(), metastoreDataRecord.value());
break;
case META_PAGE_UPDATE_NEXT_SNAPSHOT_ID:
case META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID:
case META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID:
if (metastoreOnly)
continue;
PageDeltaRecord rec0 = (PageDeltaRecord) rec;
PageMemoryEx pageMem = getPageMemoryForCacheGroup(rec0.groupId());
long page = pageMem.acquirePage(rec0.groupId(), rec0.pageId(), true);
try {
long addr = pageMem.writeLock(rec0.groupId(), rec0.pageId(), page, true);
try {
rec0.applyDelta(pageMem, addr);
} finally {
pageMem.writeUnlock(rec0.groupId(), rec0.pageId(), page, null, true, true);
}
} finally {
pageMem.releasePage(rec0.groupId(), rec0.pageId(), page);
}
break;
default:
}
}
if (!metastoreOnly)
restorePartitionState(partStates, ignoreGrps);
} finally {
if (!metastoreOnly)
cctx.kernalContext().query().skipFieldLookup(false);
}
if (log.isInfoEnabled())
log.info("Finished applying WAL changes [updatesApplied=" + applied + ", time=" + (U.currentTimeMillis() - start) + "ms]");
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCacheDatabaseSharedManager method nodeStartedPointers.
/**
*/
public List<T2<Long, WALPointer>> nodeStartedPointers() throws IgniteCheckedException {
List<T2<Long, WALPointer>> res = new ArrayList<>();
File[] files = cpDir.listFiles(NODE_STARTED_FILE_FILTER);
Arrays.sort(files, new Comparator<File>() {
@Override
public int compare(File o1, File o2) {
String n1 = o1.getName();
String n2 = o2.getName();
Long ts1 = Long.valueOf(n1.substring(0, n1.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
Long ts2 = Long.valueOf(n2.substring(0, n2.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
if (ts1 == ts2)
return 0;
else if (ts1 < ts2)
return -1;
else
return 1;
}
});
ByteBuffer buf = ByteBuffer.allocate(20);
buf.order(ByteOrder.nativeOrder());
for (File f : files) {
String name = f.getName();
Long ts = Long.valueOf(name.substring(0, name.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
try (FileChannel ch = FileChannel.open(f.toPath(), READ)) {
ch.read(buf);
buf.flip();
FileWALPointer ptr = new FileWALPointer(buf.getLong(), buf.getInt(), buf.getInt());
res.add(new T2<Long, WALPointer>(ts, ptr));
buf.clear();
} catch (IOException e) {
throw new IgniteCheckedException("Failed to read node started marker file: " + f.getAbsolutePath(), e);
}
}
return res;
}
use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.
the class GridCacheDatabaseSharedManager method reserveHistoryForPreloading.
/**
* {@inheritDoc}
*/
@Override
public boolean reserveHistoryForPreloading(int grpId, int partId, long cntr) {
CheckpointEntry cpEntry = searchCheckpointEntry(grpId, partId, cntr);
if (cpEntry == null)
return false;
WALPointer ptr = cpEntry.cpMark;
if (ptr == null)
return false;
boolean reserved;
try {
reserved = cctx.wal().reserve(ptr);
} catch (IgniteCheckedException e) {
U.error(log, "Error while trying to reserve history", e);
reserved = false;
}
if (reserved)
reservedForPreloading.put(new T2<>(grpId, partId), new T2<>(cntr, ptr));
return reserved;
}
Aggregations