Search in sources :

Example 1 with NoopIndexingSpi

use of org.apache.ignite.spi.indexing.noop.NoopIndexingSpi in project ignite by apache.

the class ValidationOnNodeJoinUtils method validate.

/**
 * @param c Ignite configuration.
 * @param cc Configuration to validate.
 * @param cacheType Cache type.
 * @param cfgStore Cache store.
 * @param ctx Context.
 * @param log Logger.
 * @throws IgniteCheckedException If failed.
 */
static void validate(IgniteConfiguration c, CacheConfiguration cc, CacheType cacheType, @Nullable CacheStore cfgStore, GridKernalContext ctx, IgniteLogger log, BiFunction<Boolean, String, IgniteCheckedException> assertParam) throws IgniteCheckedException {
    apply(assertParam, cc.getName() != null && !cc.getName().isEmpty(), "name is null or empty");
    if (cc.getCacheMode() == REPLICATED) {
        if (cc.getNearConfiguration() != null && ctx.discovery().cacheAffinityNode(ctx.discovery().localNode(), cc.getName())) {
            U.warn(log, "Near cache cannot be used with REPLICATED cache, " + "will be ignored [cacheName=" + U.maskName(cc.getName()) + ']');
            cc.setNearConfiguration(null);
        }
    }
    if (storesLocallyOnClient(c, cc, ctx))
        throw new IgniteCheckedException("DataRegion for client caches must be explicitly configured " + "on client node startup. Use DataStorageConfiguration to configure DataRegion.");
    if (cc.getCacheMode() == LOCAL && !cc.getAffinity().getClass().equals(LocalAffinityFunction.class))
        U.warn(log, "AffinityFunction configuration parameter will be ignored for local cache [cacheName=" + U.maskName(cc.getName()) + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Cannot have more than " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [cacheName=" + cc.getName() + ", partitions=" + cc.getAffinity().partitions() + ']');
    if (cc.getRebalanceMode() != CacheRebalanceMode.NONE) {
        apply(assertParam, cc.getRebalanceBatchSize() > 0, "rebalanceBatchSize > 0");
        apply(assertParam, cc.getRebalanceTimeout() >= 0, "rebalanceTimeout >= 0");
        apply(assertParam, cc.getRebalanceThrottle() >= 0, "rebalanceThrottle >= 0");
        apply(assertParam, cc.getRebalanceBatchesPrefetchCount() > 0, "rebalanceBatchesPrefetchCount > 0");
    }
    if (cc.getCacheMode() == PARTITIONED || cc.getCacheMode() == REPLICATED) {
        if (cc.getAtomicityMode() == ATOMIC && cc.getWriteSynchronizationMode() == FULL_ASYNC)
            U.warn(log, "Cache write synchronization mode is set to FULL_ASYNC. All single-key 'put' and " + "'remove' operations will return 'null', all 'putx' and 'removex' operations will return" + " 'true' [cacheName=" + U.maskName(cc.getName()) + ']');
    }
    DeploymentMode depMode = c.getDeploymentMode();
    if (c.isPeerClassLoadingEnabled() && (depMode == PRIVATE || depMode == ISOLATED) && !CU.isSystemCache(cc.getName()) && !(c.getMarshaller() instanceof BinaryMarshaller))
        throw new IgniteCheckedException("Cache can be started in PRIVATE or ISOLATED deployment mode only when" + " BinaryMarshaller is used [depMode=" + ctx.config().getDeploymentMode() + ", marshaller=" + c.getMarshaller().getClass().getName() + ']');
    if (cc.getAffinity().partitions() > CacheConfiguration.MAX_PARTITIONS_COUNT)
        throw new IgniteCheckedException("Affinity function must return at most " + CacheConfiguration.MAX_PARTITIONS_COUNT + " partitions [actual=" + cc.getAffinity().partitions() + ", affFunction=" + cc.getAffinity() + ", cacheName=" + cc.getName() + ']');
    if (cc.getAtomicityMode() == TRANSACTIONAL_SNAPSHOT) {
        apply(assertParam, cc.getCacheMode() != LOCAL, "LOCAL cache mode cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, cc.getNearConfiguration() == null, "near cache cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isReadThrough(), "readThrough cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isWriteThrough(), "writeThrough cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, !cc.isWriteBehindEnabled(), "writeBehindEnabled cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        apply(assertParam, cc.getRebalanceMode() != NONE, "Rebalance mode NONE cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        ExpiryPolicy expPlc = null;
        if (cc.getExpiryPolicyFactory() instanceof FactoryBuilder.SingletonFactory)
            expPlc = (ExpiryPolicy) cc.getExpiryPolicyFactory().create();
        if (!(expPlc instanceof EternalExpiryPolicy)) {
            apply(assertParam, cc.getExpiryPolicyFactory() == null, "expiry policy cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        }
        apply(assertParam, cc.getInterceptor() == null, "interceptor cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
        // Disable in-memory evictions for mvcc cache. TODO IGNITE-10738
        String memPlcName = cc.getDataRegionName();
        DataRegion dataRegion = ctx.cache().context().database().dataRegion(memPlcName);
        if (dataRegion != null && !dataRegion.config().isPersistenceEnabled() && dataRegion.config().getPageEvictionMode() != DataPageEvictionMode.DISABLED) {
            throw new IgniteCheckedException("Data pages evictions cannot be used with TRANSACTIONAL_SNAPSHOT " + "cache atomicity mode for in-memory regions. Please, either disable evictions or enable " + "persistence for data regions with TRANSACTIONAL_SNAPSHOT caches. [cacheName=" + cc.getName() + ", dataRegionName=" + memPlcName + ", pageEvictionMode=" + dataRegion.config().getPageEvictionMode() + ']');
        }
        IndexingSpi idxSpi = ctx.config().getIndexingSpi();
        apply(assertParam, idxSpi == null || idxSpi instanceof NoopIndexingSpi, "Custom IndexingSpi cannot be used with TRANSACTIONAL_SNAPSHOT atomicity mode");
    }
    // This method can be called when memory recovery is in progress,
    // which means that the GridDiscovery manager is not started, and therefore localNode is also not initialized.
    ClusterNode locNode = ctx.discovery().localNode() != null ? ctx.discovery().localNode() : new DetachedClusterNode(ctx.pdsFolderResolver().resolveFolders().consistentId(), ctx.nodeAttributes());
    if (cc.isWriteBehindEnabled() && ctx.discovery().cacheAffinityNode(locNode, cc.getName())) {
        if (cfgStore == null)
            throw new IgniteCheckedException("Cannot enable write-behind (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
        apply(assertParam, cc.getWriteBehindBatchSize() > 0, "writeBehindBatchSize > 0");
        apply(assertParam, cc.getWriteBehindFlushSize() >= 0, "writeBehindFlushSize >= 0");
        apply(assertParam, cc.getWriteBehindFlushFrequency() >= 0, "writeBehindFlushFrequency >= 0");
        apply(assertParam, cc.getWriteBehindFlushThreadCount() > 0, "writeBehindFlushThreadCount > 0");
        if (cc.getWriteBehindFlushSize() == 0 && cc.getWriteBehindFlushFrequency() == 0)
            throw new IgniteCheckedException("Cannot set both 'writeBehindFlushFrequency' and " + "'writeBehindFlushSize' parameters to 0 for cache: " + U.maskName(cc.getName()));
    }
    if (cc.isReadThrough() && cfgStore == null && ctx.discovery().cacheAffinityNode(locNode, cc.getName()))
        throw new IgniteCheckedException("Cannot enable read-through (loader or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    if (cc.isWriteThrough() && cfgStore == null && ctx.discovery().cacheAffinityNode(locNode, cc.getName()))
        throw new IgniteCheckedException("Cannot enable write-through (writer or store is not provided) " + "for cache: " + U.maskName(cc.getName()));
    long delay = cc.getRebalanceDelay();
    if (delay != 0) {
        if (cc.getCacheMode() != PARTITIONED)
            U.warn(log, "Rebalance delay is supported only for partitioned caches (will ignore): " + (cc.getName()));
        else if (cc.getRebalanceMode() == SYNC) {
            if (delay < 0) {
                U.warn(log, "Ignoring SYNC rebalance mode with manual rebalance start (node will not wait for " + "rebalancing to be finished): " + U.maskName(cc.getName()));
            } else {
                U.warn(log, "Using SYNC rebalance mode with rebalance delay (node will wait until rebalancing is " + "initiated for " + delay + "ms) for cache: " + U.maskName(cc.getName()));
            }
        }
    }
    ctx.coordinators().validateCacheConfiguration(cc);
    if (cc.getAtomicityMode() == ATOMIC)
        apply(assertParam, cc.getTransactionManagerLookupClassName() == null, "transaction manager can not be used with ATOMIC cache");
    if ((cc.getEvictionPolicyFactory() != null || cc.getEvictionPolicy() != null) && !cc.isOnheapCacheEnabled())
        throw new IgniteCheckedException("Onheap cache must be enabled if eviction policy is configured [cacheName=" + U.maskName(cc.getName()) + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isDataStructureCache(cc.getName()))
        throw new IgniteCheckedException("Using cache names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", cacheType=" + cacheType + "]");
    if (cacheType != CacheType.DATA_STRUCTURES && DataStructuresProcessor.isReservedGroup(cc.getGroupName()))
        throw new IgniteCheckedException("Using cache group names reserved for datastructures is not allowed for " + "other cache types [cacheName=" + cc.getName() + ", groupName=" + cc.getGroupName() + ", cacheType=" + cacheType + "]");
    // Make sure we do not use sql schema for system views.
    if (ctx.query().moduleEnabled()) {
        String schema = QueryUtils.normalizeSchemaName(cc.getName(), cc.getSqlSchema());
        if (F.eq(schema, QueryUtils.SCHEMA_SYS)) {
            if (cc.getSqlSchema() == null) {
                // Conflict on cache name.
                throw new IgniteCheckedException("SQL schema name derived from cache name is reserved (" + "please set explicit SQL schema name through CacheConfiguration.setSqlSchema() or choose " + "another cache name) [cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + "]");
            } else {
                // Conflict on schema name.
                throw new IgniteCheckedException("SQL schema name is reserved (please choose another one) [" + "cacheName=" + cc.getName() + ", schemaName=" + cc.getSqlSchema() + ']');
            }
        }
    }
    if (cc.isEncryptionEnabled() && !ctx.clientNode()) {
        StringBuilder cacheSpec = new StringBuilder("[cacheName=").append(cc.getName()).append(", groupName=").append(cc.getGroupName()).append(", cacheType=").append(cacheType).append(']');
        if (!CU.isPersistentCache(cc, c.getDataStorageConfiguration())) {
            throw new IgniteCheckedException("Using encryption is not allowed" + " for not persistent cache " + cacheSpec.toString());
        }
        EncryptionSpi encSpi = c.getEncryptionSpi();
        if (encSpi == null) {
            throw new IgniteCheckedException("EncryptionSpi should be configured to use encrypted cache " + cacheSpec.toString());
        }
        if (cc.getDiskPageCompression() != DiskPageCompression.DISABLED)
            throw new IgniteCheckedException("Encryption cannot be used with disk page compression " + cacheSpec.toString());
    }
    Collection<QueryEntity> ents = cc.getQueryEntities();
    if (ctx.discovery().discoCache() != null) {
        boolean nonDfltPrecScaleExists = ents.stream().anyMatch(e -> !F.isEmpty(e.getFieldsPrecision()) || !F.isEmpty(e.getFieldsScale()));
        if (nonDfltPrecScaleExists) {
            ClusterNode oldestNode = ctx.discovery().discoCache().oldestServerNode();
            if (PRECISION_SCALE_SINCE_VER.compareTo(oldestNode.version()) > 0) {
                throw new IgniteCheckedException("Non default precision and scale is supported since version 2.7. " + "The node with oldest version [node=" + oldestNode + ']');
            }
        }
    }
}
Also used : ClusterNode(org.apache.ignite.cluster.ClusterNode) DetachedClusterNode(org.apache.ignite.internal.cluster.DetachedClusterNode) NoopIndexingSpi(org.apache.ignite.spi.indexing.noop.NoopIndexingSpi) BinaryMarshaller(org.apache.ignite.internal.binary.BinaryMarshaller) EternalExpiryPolicy(javax.cache.expiry.EternalExpiryPolicy) QueryEntity(org.apache.ignite.cache.QueryEntity) NoopIndexingSpi(org.apache.ignite.spi.indexing.noop.NoopIndexingSpi) IndexingSpi(org.apache.ignite.spi.indexing.IndexingSpi) IgniteCheckedException(org.apache.ignite.IgniteCheckedException) EternalExpiryPolicy(javax.cache.expiry.EternalExpiryPolicy) ExpiryPolicy(javax.cache.expiry.ExpiryPolicy) DeploymentMode(org.apache.ignite.configuration.DeploymentMode) DetachedClusterNode(org.apache.ignite.internal.cluster.DetachedClusterNode) DataRegion(org.apache.ignite.internal.processors.cache.persistence.DataRegion) EncryptionSpi(org.apache.ignite.spi.encryption.EncryptionSpi)

Aggregations

EternalExpiryPolicy (javax.cache.expiry.EternalExpiryPolicy)1 ExpiryPolicy (javax.cache.expiry.ExpiryPolicy)1 IgniteCheckedException (org.apache.ignite.IgniteCheckedException)1 QueryEntity (org.apache.ignite.cache.QueryEntity)1 ClusterNode (org.apache.ignite.cluster.ClusterNode)1 DeploymentMode (org.apache.ignite.configuration.DeploymentMode)1 BinaryMarshaller (org.apache.ignite.internal.binary.BinaryMarshaller)1 DetachedClusterNode (org.apache.ignite.internal.cluster.DetachedClusterNode)1 DataRegion (org.apache.ignite.internal.processors.cache.persistence.DataRegion)1 EncryptionSpi (org.apache.ignite.spi.encryption.EncryptionSpi)1 IndexingSpi (org.apache.ignite.spi.indexing.IndexingSpi)1 NoopIndexingSpi (org.apache.ignite.spi.indexing.noop.NoopIndexingSpi)1