Search in sources :

Example 56 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class GridCacheMessage method marshalTx.

/**
 * @param txEntries Entries to marshal.
 * @param ctx Context.
 * @throws IgniteCheckedException If failed.
 */
protected final void marshalTx(Iterable<IgniteTxEntry> txEntries, GridCacheSharedContext ctx) throws IgniteCheckedException {
    assert ctx != null;
    if (txEntries != null) {
        boolean transferExpiry = transferExpiryPolicy();
        boolean p2pEnabled = ctx.deploymentEnabled();
        for (IgniteTxEntry e : txEntries) {
            e.marshal(ctx, transferExpiry);
            GridCacheContext cctx = e.context();
            if (addDepInfo) {
                if (e.key() != null)
                    prepareObject(e.key().value(cctx.cacheObjectContext(), false), ctx);
                if (e.value() != null)
                    prepareObject(e.value().value(cctx.cacheObjectContext(), false), ctx);
                if (e.entryProcessors() != null) {
                    for (T2<EntryProcessor<Object, Object, Object>, Object[]> entProc : e.entryProcessors()) prepareObject(entProc.get1(), ctx);
                }
            } else if (p2pEnabled && e.entryProcessors() != null) {
                if (!forceAddDepInfo)
                    forceAddDepInfo = true;
                for (T2<EntryProcessor<Object, Object, Object>, Object[]> entProc : e.entryProcessors()) prepareObject(entProc.get1(), ctx);
            }
        }
    }
}
Also used : IgniteTxEntry(org.apache.ignite.internal.processors.cache.transactions.IgniteTxEntry) EntryProcessor(javax.cache.processor.EntryProcessor) T2(org.apache.ignite.internal.util.typedef.T2)

Example 57 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class GridLocalAtomicCache method updateAllInternal.

/**
 * Entry point for all public update methods (put, remove, invoke).
 *
 * @param op Operation.
 * @param keys Keys.
 * @param vals Values.
 * @param invokeArgs Optional arguments for EntryProcessor.
 * @param expiryPlc Expiry policy.
 * @param retval Return value required flag.
 * @param rawRetval Return {@code GridCacheReturn} instance.
 * @param filter Cache entry filter.
 * @param writeThrough Write through.
 * @param readThrough Read through.
 * @return Update result.
 * @throws IgniteCheckedException If failed.
 */
@SuppressWarnings("unchecked")
private Object updateAllInternal(GridCacheOperation op, Collection<? extends K> keys, @Nullable Iterable<?> vals, @Nullable Object[] invokeArgs, @Nullable ExpiryPolicy expiryPlc, boolean retval, boolean rawRetval, CacheEntryPredicate filter, boolean writeThrough, boolean readThrough, boolean keepBinary) throws IgniteCheckedException {
    if (keyCheck)
        validateCacheKeys(keys);
    if (op == DELETE)
        ctx.checkSecurity(SecurityPermission.CACHE_REMOVE);
    else
        ctx.checkSecurity(SecurityPermission.CACHE_PUT);
    String taskName = ctx.kernalContext().job().currentTaskName();
    GridCacheVersion ver = ctx.versions().next();
    UUID subjId = ctx.subjectIdPerCall(null);
    CacheEntryPredicate[] filters = CU.filterArray(filter);
    IgniteBiTuple<Boolean, ?> res = null;
    CachePartialUpdateCheckedException err = null;
    ctx.shared().database().checkpointReadLock();
    try {
        ctx.shared().database().ensureFreeSpace(ctx.dataRegion());
        if (writeThrough && keys.size() > 1) {
            return updateWithBatch(op, keys, vals, invokeArgs, expiryPlc, ver, filters, keepBinary, subjId, taskName);
        }
        Iterator<?> valsIter = vals != null ? vals.iterator() : null;
        boolean intercept = ctx.config().getInterceptor() != null;
        for (K key : keys) {
            if (key == null)
                throw new NullPointerException("Null key.");
            Object val = valsIter != null ? valsIter.next() : null;
            if (val == null && op != DELETE)
                throw new NullPointerException("Null value.");
            KeyCacheObject cacheKey = ctx.toCacheKeyObject(key);
            if (op == UPDATE) {
                val = ctx.toCacheObject(val);
                ctx.validateKeyAndValue(cacheKey, (CacheObject) val);
            } else if (op == TRANSFORM)
                ctx.kernalContext().resource().inject(val, GridResourceIoc.AnnotationSet.ENTRY_PROCESSOR, ctx.name());
            while (true) {
                GridCacheEntryEx entry = null;
                try {
                    entry = entryEx(cacheKey);
                    GridTuple3<Boolean, Object, EntryProcessorResult<Object>> t = entry.innerUpdateLocal(ver, val == null ? DELETE : op, val, invokeArgs, writeThrough, readThrough, retval, keepBinary, expiryPlc, true, true, filters, intercept, subjId, taskName);
                    if (op == TRANSFORM) {
                        if (t.get3() != null) {
                            Map<K, EntryProcessorResult> computedMap;
                            if (res == null) {
                                computedMap = U.newHashMap(keys.size());
                                res = new IgniteBiTuple<>(true, computedMap);
                            } else
                                computedMap = (Map<K, EntryProcessorResult>) res.get2();
                            computedMap.put(key, t.get3());
                        }
                    } else if (res == null)
                        res = new T2(t.get1(), t.get2());
                    // While.
                    break;
                } catch (GridCacheEntryRemovedException ignored) {
                    if (log.isDebugEnabled())
                        log.debug("Got removed entry while updating (will retry): " + key);
                    entry = null;
                } catch (IgniteCheckedException e) {
                    if (err == null)
                        err = partialUpdateException();
                    err.add(F.asList(key), e);
                    U.error(log, "Failed to update key : " + key, e);
                    break;
                } finally {
                    if (entry != null)
                        ctx.evicts().touch(entry, ctx.affinity().affinityTopologyVersion());
                }
            }
        }
    } finally {
        ctx.shared().database().checkpointReadUnlock();
    }
    if (err != null)
        throw err;
    Object ret = res == null ? null : rawRetval ? new GridCacheReturn(ctx, true, keepBinary, res.get2(), res.get1()) : (retval || op == TRANSFORM) ? res.get2() : res.get1();
    if (op == TRANSFORM && ret == null)
        ret = Collections.emptyMap();
    return ret;
}
Also used : GridCacheReturn(org.apache.ignite.internal.processors.cache.GridCacheReturn) CachePartialUpdateCheckedException(org.apache.ignite.internal.processors.cache.CachePartialUpdateCheckedException) GridCacheVersion(org.apache.ignite.internal.processors.cache.version.GridCacheVersion) GridCacheEntryEx(org.apache.ignite.internal.processors.cache.GridCacheEntryEx) EntryProcessorResult(javax.cache.processor.EntryProcessorResult) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) CacheObject(org.apache.ignite.internal.processors.cache.CacheObject) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject) GridCacheEntryRemovedException(org.apache.ignite.internal.processors.cache.GridCacheEntryRemovedException) CacheEntryPredicate(org.apache.ignite.internal.processors.cache.CacheEntryPredicate) UUID(java.util.UUID) Map(java.util.Map) LinkedHashMap(java.util.LinkedHashMap) T2(org.apache.ignite.internal.util.typedef.T2) KeyCacheObject(org.apache.ignite.internal.processors.cache.KeyCacheObject)

Example 58 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class GridCacheDatabaseSharedManager method applyLastUpdates.

/**
 * @param status Last registered checkpoint status.
 * @throws IgniteCheckedException If failed to apply updates.
 * @throws StorageException If IO exception occurred while reading write-ahead log.
 */
private void applyLastUpdates(CheckpointStatus status, boolean metastoreOnly) throws IgniteCheckedException {
    if (log.isInfoEnabled())
        log.info("Applying lost cache updates since last checkpoint record [lastMarked=" + status.startPtr + ", lastCheckpointId=" + status.cpStartId + ']');
    if (!metastoreOnly)
        cctx.kernalContext().query().skipFieldLookup(true);
    long start = U.currentTimeMillis();
    int applied = 0;
    Collection<Integer> ignoreGrps = metastoreOnly ? Collections.emptySet() : initiallyWalDisabledGrps;
    try (WALIterator it = cctx.wal().replay(status.startPtr)) {
        Map<T2<Integer, Integer>, T2<Integer, Long>> partStates = new HashMap<>();
        while (it.hasNextX()) {
            IgniteBiTuple<WALPointer, WALRecord> next = it.nextX();
            WALRecord rec = next.get2();
            switch(rec.type()) {
                case DATA_RECORD:
                    if (metastoreOnly)
                        continue;
                    DataRecord dataRec = (DataRecord) rec;
                    for (DataEntry dataEntry : dataRec.writeEntries()) {
                        int cacheId = dataEntry.cacheId();
                        int grpId = cctx.cache().cacheDescriptor(cacheId).groupId();
                        if (!ignoreGrps.contains(grpId)) {
                            GridCacheContext cacheCtx = cctx.cacheContext(cacheId);
                            applyUpdate(cacheCtx, dataEntry);
                            applied++;
                        }
                    }
                    break;
                case PART_META_UPDATE_STATE:
                    if (metastoreOnly)
                        continue;
                    PartitionMetaStateRecord metaStateRecord = (PartitionMetaStateRecord) rec;
                    if (!ignoreGrps.contains(metaStateRecord.groupId())) {
                        partStates.put(new T2<>(metaStateRecord.groupId(), metaStateRecord.partitionId()), new T2<>((int) metaStateRecord.state(), metaStateRecord.updateCounter()));
                    }
                    break;
                case METASTORE_DATA_RECORD:
                    MetastoreDataRecord metastoreDataRecord = (MetastoreDataRecord) rec;
                    metaStorage.applyUpdate(metastoreDataRecord.key(), metastoreDataRecord.value());
                    break;
                case META_PAGE_UPDATE_NEXT_SNAPSHOT_ID:
                case META_PAGE_UPDATE_LAST_SUCCESSFUL_SNAPSHOT_ID:
                case META_PAGE_UPDATE_LAST_SUCCESSFUL_FULL_SNAPSHOT_ID:
                    if (metastoreOnly)
                        continue;
                    PageDeltaRecord rec0 = (PageDeltaRecord) rec;
                    PageMemoryEx pageMem = getPageMemoryForCacheGroup(rec0.groupId());
                    long page = pageMem.acquirePage(rec0.groupId(), rec0.pageId(), true);
                    try {
                        long addr = pageMem.writeLock(rec0.groupId(), rec0.pageId(), page, true);
                        try {
                            rec0.applyDelta(pageMem, addr);
                        } finally {
                            pageMem.writeUnlock(rec0.groupId(), rec0.pageId(), page, null, true, true);
                        }
                    } finally {
                        pageMem.releasePage(rec0.groupId(), rec0.pageId(), page);
                    }
                    break;
                default:
            }
        }
        if (!metastoreOnly)
            restorePartitionState(partStates, ignoreGrps);
    } finally {
        if (!metastoreOnly)
            cctx.kernalContext().query().skipFieldLookup(false);
    }
    if (log.isInfoEnabled())
        log.info("Finished applying WAL changes [updatesApplied=" + applied + ", time=" + (U.currentTimeMillis() - start) + "ms]");
}
Also used : WALRecord(org.apache.ignite.internal.pagemem.wal.record.WALRecord) GridCacheContext(org.apache.ignite.internal.processors.cache.GridCacheContext) ConcurrentLinkedHashMap(org.jsr166.ConcurrentLinkedHashMap) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) PartitionMetaStateRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PartitionMetaStateRecord) PageDeltaRecord(org.apache.ignite.internal.pagemem.wal.record.delta.PageDeltaRecord) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) DataEntry(org.apache.ignite.internal.pagemem.wal.record.DataEntry) WALIterator(org.apache.ignite.internal.pagemem.wal.WALIterator) MetastoreDataRecord(org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord) PageMemoryEx(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx) DataRecord(org.apache.ignite.internal.pagemem.wal.record.DataRecord) MetastoreDataRecord(org.apache.ignite.internal.pagemem.wal.record.MetastoreDataRecord) WALPointer(org.apache.ignite.internal.pagemem.wal.WALPointer) FileWALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer) T2(org.apache.ignite.internal.util.typedef.T2)

Example 59 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class GridCacheDatabaseSharedManager method nodeStartedPointers.

/**
 */
public List<T2<Long, WALPointer>> nodeStartedPointers() throws IgniteCheckedException {
    List<T2<Long, WALPointer>> res = new ArrayList<>();
    File[] files = cpDir.listFiles(NODE_STARTED_FILE_FILTER);
    Arrays.sort(files, new Comparator<File>() {

        @Override
        public int compare(File o1, File o2) {
            String n1 = o1.getName();
            String n2 = o2.getName();
            Long ts1 = Long.valueOf(n1.substring(0, n1.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
            Long ts2 = Long.valueOf(n2.substring(0, n2.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
            if (ts1 == ts2)
                return 0;
            else if (ts1 < ts2)
                return -1;
            else
                return 1;
        }
    });
    ByteBuffer buf = ByteBuffer.allocate(20);
    buf.order(ByteOrder.nativeOrder());
    for (File f : files) {
        String name = f.getName();
        Long ts = Long.valueOf(name.substring(0, name.length() - NODE_STARTED_FILE_NAME_SUFFIX.length()));
        try (FileChannel ch = FileChannel.open(f.toPath(), READ)) {
            ch.read(buf);
            buf.flip();
            FileWALPointer ptr = new FileWALPointer(buf.getLong(), buf.getInt(), buf.getInt());
            res.add(new T2<Long, WALPointer>(ts, ptr));
            buf.clear();
        } catch (IOException e) {
            throw new IgniteCheckedException("Failed to read node started marker file: " + f.getAbsolutePath(), e);
        }
    }
    return res;
}
Also used : FileWALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer) FileChannel(java.nio.channels.FileChannel) CopyOnWriteArrayList(java.util.concurrent.CopyOnWriteArrayList) ArrayList(java.util.ArrayList) IOException(java.io.IOException) ByteBuffer(java.nio.ByteBuffer) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) RandomAccessFile(java.io.RandomAccessFile) File(java.io.File) WALPointer(org.apache.ignite.internal.pagemem.wal.WALPointer) FileWALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer) T2(org.apache.ignite.internal.util.typedef.T2)

Example 60 with T2

use of org.apache.ignite.internal.util.typedef.T2 in project ignite by apache.

the class GridCacheDatabaseSharedManager method reserveHistoryForPreloading.

/**
 * {@inheritDoc}
 */
@Override
public boolean reserveHistoryForPreloading(int grpId, int partId, long cntr) {
    CheckpointEntry cpEntry = searchCheckpointEntry(grpId, partId, cntr);
    if (cpEntry == null)
        return false;
    WALPointer ptr = cpEntry.cpMark;
    if (ptr == null)
        return false;
    boolean reserved;
    try {
        reserved = cctx.wal().reserve(ptr);
    } catch (IgniteCheckedException e) {
        U.error(log, "Error while trying to reserve history", e);
        reserved = false;
    }
    if (reserved)
        reservedForPreloading.put(new T2<>(grpId, partId), new T2<>(cntr, ptr));
    return reserved;
}
Also used : IgniteCheckedException(org.apache.ignite.IgniteCheckedException) WALPointer(org.apache.ignite.internal.pagemem.wal.WALPointer) FileWALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.FileWALPointer) T2(org.apache.ignite.internal.util.typedef.T2)

Aggregations

T2 (org.apache.ignite.internal.util.typedef.T2)83 ArrayList (java.util.ArrayList)33 HashMap (java.util.HashMap)31 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)25 AtomicInteger (java.util.concurrent.atomic.AtomicInteger)25 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)22 Map (java.util.Map)17 Ignite (org.apache.ignite.Ignite)15 UUID (java.util.UUID)14 ClusterNode (org.apache.ignite.cluster.ClusterNode)14 ContinuousQuery (org.apache.ignite.cache.query.ContinuousQuery)13 ConcurrentMap (java.util.concurrent.ConcurrentMap)12 HashSet (java.util.HashSet)11 List (java.util.List)11 GridCacheContext (org.apache.ignite.internal.processors.cache.GridCacheContext)9 CopyOnWriteArrayList (java.util.concurrent.CopyOnWriteArrayList)8 CacheEntryEvent (javax.cache.event.CacheEntryEvent)8 AffinityTopologyVersion (org.apache.ignite.internal.processors.affinity.AffinityTopologyVersion)8 CacheException (javax.cache.CacheException)7 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)7