Search in sources :

Example 16 with DataRegion

use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.

the class ValidationOnNodeJoinUtils method validate.

/**
 * @param c Ignite configuration.
 * @param cc Configuration to validate.
 * @param cacheType Cache type.
 * @param cfgStore Cache store.
 * @param ctx Context.
 * @param log Logger.
 * @throws IgniteCheckedException If failed.
 */
static void validate(IgniteConfiguration c, CacheConfiguration cc, CacheType cacheType, @Nullable CacheStore cfgStore, GridKernalContext ctx, IgniteLogger log, BiFunction<Boolean, String, IgniteCheckedException> assertParam) throws IgniteCheckedException {
    apply(assertParam, cc.getName() != null && !cc.getName().isEmpty(), "name is null or empty");
    if (cc.getCacheMode() == REPLICATED) {
        if (cc.getNearConfiguration() != null && ctx.discovery().cacheAffinityNode(ctx.discovery().localNode(), cc.getName())) {
            U.warn(log, "Near cache cannot be used with REPLICATED cache, " + "will be ignored [cacheName=" + U.maskName(cc.getName()) + ']');
            cc.setNearConfiguration(null);
        }
    }
    if (storesLocallyOnClient(c, cc, ctx))
        throw new IgniteCheckedException("DataRegion for client caches must be explicitly configured " + "on client node startup. Use DataStorageConfiguration to configure DataRegion.");
    if (cc.getCacheMode() == LOCAL && !cc.getAffinity().getClass().equals(LocalAffinityFunction.class))
        U.warn(log, "AffinityFunction configuration parameter will be ignored for local cache [cacheName=" + U.maskName(cc.getName()) + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Cannot have more than " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [cacheName=" + cc.getName() + ", partitions=" + cc.getAffinity().partitions() + ']');
    if (cc.getRebalanceMode() != CacheRebalanceMode.NONE) {
        apply(assertParam, cc.getRebalanceBatchSize() > 0, "rebalanceBatchSize > 0");
        apply(assertParam, cc.getRebalanceTimeout() >= 0, "rebalanceTimeout >= 0");
        apply(assertParam, cc.getRebalanceThrottle() >= 0, "rebalanceThrottle >= 0");
        apply(assertParam, cc.getRebalanceBatchesPrefetchCount() > 0, "rebalanceBatchesPrefetchCount > 0");
    }
    if (cc.getCacheMode() == PARTITIONED || cc.getCacheMode() == REPLICATED) {
        if (cc.getAtomicityMode() == ATOMIC && cc.getWriteSynchronizationMode() == FULL_ASYNC)
            U.warn(log, "Cache write synchronization mode is set to FULL_ASYNC. All single-key 'put' and " + "'remove' operations will return 'null', all 'putx' and 'removex' operations will return" + " 'true' [cacheName=" + U.maskName(cc.getName()) + ']');
    }
    DeploymentMode depMode = c.getDeploymentMode();
    if (c.isPeerClassLoadingEnabled() && (depMode == PRIVATE || depMode == ISOLATED) && !CU.isSystemCache(cc.getName()) && !(c.getMarshaller() instanceof BinaryMarshaller))
        throw new IgniteCheckedException("Cache can be started in PRIVATE or ISOLATED deployment mode only when" + " BinaryMarshaller is used [depMode=" + ctx.config().getDeploymentMode() + ", marshaller=" + c.getMarshaller().getClass().getName() + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Affinity function must return at most " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [actual=" + cc.getAffinity().partitions() + ", affFunction=" + cc.getAffinity() + ", cacheName=" + cc.getName() + ']');
    if (cc.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT) {
        apply(assertParam, cc.getCacheMode() != LOCAL, "LOCAL cache mode cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, cc.getNearConfiguration() == null, "near cache cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isReadThrough(), "readThrough cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isWriteThrough(), "writeThrough cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isWriteBehindEnabled(), "writeBehindEnabled cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, cc.getRebalanceMode() != NONE, "Rebalance mode NONE cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        ExpiryPolicy expPlc = null;
        if (cc.getExpiryPolicyFactory() instanceof FactoryBuilder.SingletonFactory)
            expPlc = (ExpiryPolicy) cc.getExpiryPolicyFactory().create();
        if (!(expPlc instanceof EternalExpiryPolicy)) {
            apply(assertParam, cc.getExpiryPolicyFactory() == null, "expiry policy cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        }
        apply(assertParam, cc.getInterceptor() == null, "interceptor cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        // Disable in-memory evictions for mvcc cache. TODO IGNITE-10738
        String memPlcName = cc.getDataRegionName();
        DataRegion dataRegion = ctx.cache().context().database().dataRegion(memPlcName);
        if (dataRegion != null && !dataRegion.config().isPersistenceEnabled() && dataRegion.config().getPageEvictionMode() != DataPageEvictionMode.DISABLED) {
            throw new IgniteCheckedException("Data pages evictions cannot be used with TRANSACTIONAL_SNAPSHOT " + "cache atomicity mode for in-memory regions. Please, either disable evictions or enable " + "persistence for data regions with TRANSACTIONAL_SNAPSHOT caches. [cacheName=" + cc.getName() + ", dataRegionName=" + memPlcName + ", pageEvictionMode=" + dataRegion.config().getPageEvictionMode() + ']');
        }
        IndexingSpi idxSpi = ctx.config().getIndexingSpi();
        apply(assertParam, idxSpi == null || idxSpi instanceof NoopIndexingSpi, "Custom IndexingSpi cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
    }
    // This method can be called when memory recovery is in progress,
    // which means that the GridDiscovery manager is not started, and therefore localNode is also not initialized.
    ClusterNode locNode = ctx.discovery().localNode() != null ? ctx.discovery().localNode() : new DetachedClusterNode(ctx.pdsFolderResolver().resolveFolders().consistentId(), ctx.nodeAttributes());
    if (cc.isWriteBehindEnabled() && ctx.discovery().cacheAffinityNode(locNode, cc.getName())) {
        if (cfgStore == null)
            throw new IgniteCheckedException("Cannot enable write-behind (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
        apply(assertParam, cc.getWriteBehindBatchSize() > 0, "writeBehindBatchSize > 0");
        apply(assertParam, cc.getWriteBehindFlushSize() >= 0, "writeBehindFlushSize >= 0");
        apply(assertParam, cc.getWriteBehindFlushFrequency() >= 0, "writeBehindFlushFrequency >= 0");
        apply(assertParam, cc.getWriteBehindFlushThreadCount() > 0, "writeBehindFlushThreadCount > 0");
        if (cc.getWriteBehindFlushSize() == 0 && cc.getWriteBehindFlushFrequency() == 0)
            throw new IgniteCheckedException("Cannot set both 'writeBehindFlushFrequency' and " + "'writeBehindFlushSize' parameters to 0 for cache: " + U.maskName(cc.getName()));
    }
    if (cc.isReadThrough() && cfgStore == null && ctx.discovery().cacheAffinityNode(locNode, cc.getName()))
        throw new IgniteCheckedException("Cannot enable read-through (loader or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    if (cc.isWriteThrough() && cfgStore == null && ctx.discovery().cacheAffinityNode(locNode, cc.getName()))
        throw new IgniteCheckedException("Cannot enable write-through (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    long delay = cc.getRebalanceDelay();
    if (delay != 0) {
        if (cc.getCacheMode() != PARTITIONED)
            U.warn(log, "Rebalance delay is supported only for partitioned caches (will ignore): " + (cc.getName()));
        else if (cc.getRebalanceMode() == SYNC) {
            if (delay < 0) {
                U.warn(log, "Ignoring SYNC rebalance mode with manual rebalance start (node will not wait for " + "rebalancing to be finished): " + U.maskName(cc.getName()));
            } else {
                U.warn(log, "Using SYNC rebalance mode with rebalance delay (node will wait until rebalancing is " + "initiated for " + delay + "ms) for cache: " + U.maskName(cc.getName()));
            }
        }
    }
    ctx.coordinators().validateCacheConfiguration(cc);
    if (cc.getAtomicityMode() == ATOMIC)
        apply(assertParam, cc.getTransactionManagerLookupClassName() == null, "transaction manager can not be used with ATOMIC cache");
    if ((cc.getEvictionPolicyFactory() != null || cc.getEvictionPolicy() != null) && !cc.isOnheapCacheEnabled())
        throw new IgniteCheckedException("Onheap cache must be enabled if eviction policy is configured [cacheName=" + U.maskName(cc.getName()) + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isDataStructureCache(cc.getName()))
        throw new IgniteCheckedException("Using cache names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", cacheType=" + cacheType + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isReservedGroup(cc.getGroupName()))
        throw new IgniteCheckedException("Using cache group names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", groupName=" + cc.getGroupName() + ", cacheType=" + cacheType + "]");
    // Make sure we do not use sql schema for system views.
    if (ctx.query().moduleEnabled()) {
        String schema = QueryUtils.normalizeSchemaName(cc.getName(), cc.getSqlSchema());
        if (F.eq(schema, QueryUtils.SCHEMA_SYS)) {
            if (cc.getSqlSchema() == null) {
                // Conflict on cache name.
                throw new IgniteCheckedException("SQL schema name derived from cache name is reserved (" + "please set explicit SQL schema name through CacheConfiguration.setSqlSchema() or choose " + "another cache name) [cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + "]");
            } else {
                // Conflict on schema name.
                throw new IgniteCheckedException("SQL schema name is reserved (please choose another one) [" + "cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + ']');
            }
        }
    }
    if (cc.isEncryptionEnabled() && !ctx.clientNode()) {
        StringBuilder cacheSpec = new StringBuilder("[cacheName=").append(cc.getName()).append(", groupName=").append(cc.getGroupName()).append(", cacheType=").append(cacheType).append(']');
        if (!CU.isPersistentCache(cc, c.getDataStorageConfiguration())) {
            throw new IgniteCheckedException("Using encryption is not allowed" + " for not persistent cache " + cacheSpec.toString());
        }
        EncryptionSpi encSpi = c.getEncryptionSpi();
        if (encSpi == null) {
            throw new IgniteCheckedException("EncryptionSpi should be configured to use encrypted cache " + cacheSpec.toString());
        }
        if (cc.getDiskPageCompression() != DiskPageCompression.DISABLED)
            throw new IgniteCheckedException("Encryption cannot be used with disk page compression " + cacheSpec.toString());
    }
    Collection<QueryEntity> ents = cc.getQueryEntities();
    if (ctx.discovery().discoCache() != null) {
        boolean nonDfltPrecScaleExists = ents.stream().anyMatch(e -> !F.isEmpty(e.getFieldsPrecision()) || !F.isEmpty(e.getFieldsScale()));
        if (nonDfltPrecScaleExists) {
            ClusterNode oldestNode = ctx.discovery().discoCache().oldestServerNode();
            if (PRECISION_SCALE_SINCE_VER.compareTo(oldestNode.version()) > 0) {
                throw new IgniteCheckedException("Non default precision and scale is supported since version 2.7. " + "The node with oldest version [node=" + oldestNode + ']');
            }
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) DetachedClusterNode(org.apache.ignite.internal.cluster.DetachedClusterNode) NoopIndexingSpi(org.apache.ignite.spi.indexing.noop.NoopIndexingSpi) BinaryMarshaller(org.apache.ignite.internal.binary.BinaryMarshaller) EternalExpiryPolicy(javax.cache.expiry.EternalExpiryPolicy) QueryEntity(org.apache.ignite.cache.QueryEntity) NoopIndexingSpi(org.apache.ignite.spi.indexing.noop.NoopIndexingSpi) IndexingSpi(org.apache.ignite.spi.indexing.IndexingSpi) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) EternalExpiryPolicy(javax.cache.expiry.EternalExpiryPolicy) ExpiryPolicy(javax.cache.expiry.ExpiryPolicy) DeploymentMode(org.apache.ignite.configuration.DeploymentMode) DetachedClusterNode(org.apache.ignite.internal.cluster.DetachedClusterNode) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion) EncryptionSpi(org.apache.ignite.spi.encryption.EncryptionSpi)

Example 17 with DataRegion

use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.

the class MvccProcessorImpl method txLogPageStoreInit.

/**
 * @param mgr Database shared manager.
 * @throws IgniteCheckedException If failed.
 */
private void txLogPageStoreInit(IgniteCacheDatabaseSharedManager mgr) throws IgniteCheckedException {
    assert CU.isPersistenceEnabled(ctx.config());
    DataRegion dataRegion = mgr.dataRegion(TX_LOG_CACHE_NAME);
    PageMetrics pageMetrics = dataRegion.metrics().cacheGrpPageMetrics(TX_LOG_CACHE_ID);
    // noinspection ConstantConditions
    ctx.cache().context().pageStore().initialize(TX_LOG_CACHE_ID, 0, TX_LOG_CACHE_NAME, pageMetrics);
}
Also used : PageMetrics(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMetrics) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion)

Example 18 with DataRegion

use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.

the class CheckpointWorkflow method markCheckpointBegin.

/**
 * First stage of checkpoint which collects demanded information(dirty pages mostly).
 *
 * @param cpTs Checkpoint start timestamp.
 * @param curr Current checkpoint event info.
 * @param tracker Checkpoint metrics tracker.
 * @param workProgressDispatcher Work progress dispatcher.
 * @return Checkpoint collected info.
 * @throws IgniteCheckedException if fail.
 */
public Checkpoint markCheckpointBegin(long cpTs, CheckpointProgressImpl curr, CheckpointMetricsTracker tracker, WorkProgressDispatcher workProgressDispatcher) throws IgniteCheckedException {
    Collection<DataRegion> checkpointedRegions = dataRegions.get();
    List<CheckpointListener> dbLsnrs = getRelevantCheckpointListeners(checkpointedRegions);
    CheckpointRecord cpRec = new CheckpointRecord(memoryRecoveryRecordPtr);
    memoryRecoveryRecordPtr = null;
    IgniteFuture snapFut = null;
    CheckpointPagesInfoHolder cpPagesHolder;
    int dirtyPagesCount;
    boolean hasPartitionsToDestroy;
    WALPointer cpPtr = null;
    CheckpointContextImpl ctx0 = new CheckpointContextImpl(curr, new PartitionAllocationMap(), checkpointCollectPagesInfoPool, workProgressDispatcher);
    checkpointReadWriteLock.readLock();
    try {
        for (CheckpointListener lsnr : dbLsnrs) lsnr.beforeCheckpointBegin(ctx0);
        ctx0.awaitPendingTasksFinished();
    } finally {
        checkpointReadWriteLock.readUnlock();
    }
    tracker.onLockWaitStart();
    checkpointReadWriteLock.writeLock();
    try {
        curr.transitTo(LOCK_TAKEN);
        tracker.onMarkStart();
        // Listeners must be invoked before we write checkpoint record to WAL.
        for (CheckpointListener lsnr : dbLsnrs) lsnr.onMarkCheckpointBegin(ctx0);
        ctx0.awaitPendingTasksFinished();
        tracker.onListenersExecuteEnd();
        if (curr.nextSnapshot())
            snapFut = snapshotMgr.onMarkCheckPointBegin(curr.snapshotOperation(), ctx0.partitionStatMap());
        fillCacheGroupState(cpRec);
        // There are allowable to replace pages only after checkpoint entry was stored to disk.
        cpPagesHolder = beginAllCheckpoints(checkpointedRegions, curr.futureFor(MARKER_STORED_TO_DISK));
        curr.currentCheckpointPagesCount(cpPagesHolder.pagesNum());
        dirtyPagesCount = cpPagesHolder.pagesNum();
        hasPartitionsToDestroy = !curr.getDestroyQueue().pendingReqs().isEmpty();
        if (dirtyPagesCount > 0 || curr.nextSnapshot() || hasPartitionsToDestroy) {
            // No page updates for this checkpoint are allowed from now on.
            if (wal != null)
                cpPtr = wal.log(cpRec);
            if (cpPtr == null)
                cpPtr = CheckpointStatus.NULL_PTR;
        }
        curr.transitTo(PAGE_SNAPSHOT_TAKEN);
    } finally {
        checkpointReadWriteLock.writeUnlock();
        tracker.onLockRelease();
    }
    curr.transitTo(LOCK_RELEASED);
    for (CheckpointListener lsnr : dbLsnrs) lsnr.onCheckpointBegin(ctx0);
    if (snapFut != null) {
        try {
            snapFut.get();
        } catch (IgniteException e) {
            U.error(log, "Failed to wait for snapshot operation initialization: " + curr.snapshotOperation(), e);
        }
    }
    if (dirtyPagesCount > 0 || hasPartitionsToDestroy) {
        tracker.onWalCpRecordFsyncStart();
        // Sync log outside the checkpoint write lock.
        if (wal != null)
            wal.flush(cpPtr, true);
        tracker.onWalCpRecordFsyncEnd();
        CheckpointEntry checkpointEntry = null;
        if (checkpointMarkersStorage != null)
            checkpointEntry = checkpointMarkersStorage.writeCheckpointEntry(cpTs, cpRec.checkpointId(), cpPtr, cpRec, CheckpointEntryType.START, skipSync);
        curr.transitTo(MARKER_STORED_TO_DISK);
        tracker.onSplitAndSortCpPagesStart();
        GridConcurrentMultiPairQueue<PageMemoryEx, FullPageId> cpPages = splitAndSortCpPagesIfNeeded(cpPagesHolder);
        tracker.onSplitAndSortCpPagesEnd();
        return new Checkpoint(checkpointEntry, cpPages, curr);
    } else {
        if (curr.nextSnapshot() && wal != null)
            wal.flush(null, true);
        return new Checkpoint(null, GridConcurrentMultiPairQueue.EMPTY, curr);
    }
}
Also used : IgniteFuture(org.apache.ignite.lang.IgniteFuture) CheckpointRecord(org.apache.ignite.internal.pagemem.wal.record.CheckpointRecord) PartitionAllocationMap(org.apache.ignite.internal.processors.cache.persistence.partstate.PartitionAllocationMap) IgniteException(org.apache.ignite.IgniteException) PageMemoryEx(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx) WALPointer(org.apache.ignite.internal.processors.cache.persistence.wal.WALPointer) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion) FullPageId(org.apache.ignite.internal.pagemem.FullPageId)

Example 19 with DataRegion

use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.

the class CheckpointWorkflow method beginAllCheckpoints.

/**
 * @param allowToReplace The sign which allows to replace pages from a checkpoint by page replacer.
 * @return holder of FullPageIds obtained from each PageMemory, overall number of dirty pages, and flag defines at
 * least one user page became a dirty since last checkpoint.
 */
private CheckpointPagesInfoHolder beginAllCheckpoints(Collection<DataRegion> regions, IgniteInternalFuture<?> allowToReplace) {
    Collection<Map.Entry<PageMemoryEx, GridMultiCollectionWrapper<FullPageId>>> res = new ArrayList<>(regions.size());
    int pagesNum = 0;
    for (DataRegion reg : regions) {
        if (!reg.config().isPersistenceEnabled())
            continue;
        GridMultiCollectionWrapper<FullPageId> nextCpPages = ((PageMemoryEx) reg.pageMemory()).beginCheckpoint(allowToReplace);
        pagesNum += nextCpPages.size();
        res.add(new T2<>((PageMemoryEx) reg.pageMemory(), nextCpPages));
    }
    return new CheckpointPagesInfoHolder(res, pagesNum);
}
Also used : ArrayList(java.util.ArrayList) PageMemoryEx(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx) FullPageId(org.apache.ignite.internal.pagemem.FullPageId) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion)

Example 20 with DataRegion

use of org.apache.ignite.internal.processors.cache.persistence.DataRegion in project ignite by apache.

the class CheckpointWorkflow method finalizeCheckpointOnRecovery.

/**
 * This method makes sense if node was stopped during the checkpoint(Start marker was written to disk while end
 * marker are not). It is able to write all pages to disk and create end marker.
 *
 * @throws IgniteCheckedException If failed.
 */
public void finalizeCheckpointOnRecovery(long cpTs, UUID cpId, WALPointer walPtr, StripedExecutor exec, CheckpointPagesWriterFactory checkpointPagesWriterFactory) throws IgniteCheckedException {
    assert cpTs != 0;
    long start = System.currentTimeMillis();
    Collection<DataRegion> regions = dataRegions.get();
    CheckpointPagesInfoHolder cpPagesHolder = beginAllCheckpoints(regions, new GridFinishedFuture<>());
    // Sort and split all dirty pages set to several stripes.
    GridConcurrentMultiPairQueue<PageMemoryEx, FullPageId> pages = splitAndSortCpPagesIfNeeded(cpPagesHolder);
    // Identity stores set for future fsync.
    Collection<PageStore> updStores = new GridConcurrentHashSet<>();
    AtomicInteger cpPagesCnt = new AtomicInteger();
    // Shared refernce for tracking exception during write pages.
    AtomicReference<Throwable> writePagesError = new AtomicReference<>();
    for (int stripeIdx = 0; stripeIdx < exec.stripesCount(); stripeIdx++) exec.execute(stripeIdx, checkpointPagesWriterFactory.buildRecovery(pages, updStores, writePagesError, cpPagesCnt));
    // Await completion all write tasks.
    awaitApplyComplete(exec, writePagesError);
    long written = U.currentTimeMillis();
    // Fsync all touched stores.
    for (PageStore updStore : updStores) updStore.sync();
    long fsync = U.currentTimeMillis();
    for (DataRegion memPlc : regions) {
        if (memPlc.config().isPersistenceEnabled())
            ((PageMemoryEx) memPlc.pageMemory()).finishCheckpoint();
    }
    checkpointMarkersStorage.writeCheckpointEntry(cpTs, cpId, walPtr, null, CheckpointEntryType.END, skipSync);
    if (log.isInfoEnabled())
        log.info(String.format("Checkpoint finished [cpId=%s, pages=%d, markPos=%s, " + "pagesWrite=%dms, fsync=%dms, total=%dms]", cpId, cpPagesCnt.get(), walPtr, written - start, fsync - written, fsync - start));
}
Also used : PageStore(org.apache.ignite.internal.pagemem.store.PageStore) AtomicReference(java.util.concurrent.atomic.AtomicReference) GridConcurrentHashSet(org.apache.ignite.internal.util.GridConcurrentHashSet) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PageMemoryEx(org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion) FullPageId(org.apache.ignite.internal.pagemem.FullPageId)

Aggregations

DataRegion (org.apache.ignite.internal.processors.cache.persistence.DataRegion)29 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)7 PageMemoryEx (org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMemoryEx)6 PageMetrics (org.apache.ignite.internal.processors.cache.persistence.pagemem.PageMetrics)6 GridCommonAbstractTest (org.apache.ignite.testframework.junits.common.GridCommonAbstractTest)6 Test (org.junit.Test)6 DataRegionConfiguration (org.apache.ignite.configuration.DataRegionConfiguration)5 FullPageId (org.apache.ignite.internal.pagemem.FullPageId)5 GridCacheDatabaseSharedManager (org.apache.ignite.internal.processors.cache.persistence.GridCacheDatabaseSharedManager)5 File (java.io.File)4 IgniteConfiguration (org.apache.ignite.configuration.IgniteConfiguration)4 IgniteEx (org.apache.ignite.internal.IgniteEx)4 PageMemory (org.apache.ignite.internal.pagemem.PageMemory)4 DataRegionMetricsImpl (org.apache.ignite.internal.processors.cache.persistence.DataRegionMetricsImpl)4 ArrayList (java.util.ArrayList)3 Map (java.util.Map)3 IgniteCache (org.apache.ignite.IgniteCache)3 CacheConfiguration (org.apache.ignite.configuration.CacheConfiguration)3 Collection (java.util.Collection)2 List (java.util.List)2