Search in sources :

Example 26 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class MapRangeIndex method doIndexAddition.

protected void doIndexAddition(Object mapKey, Object indexKey, Object value, RegionEntry entry) throws IMQException {
    boolean isPr = this.region instanceof BucketRegion;
    // Get RangeIndex for it or create it if absent
    RangeIndex rg = (RangeIndex) this.mapKeyToValueIndex.get(mapKey);
    if (rg == null) {
        // use previously created MapRangeIndexStatistics
        IndexStatistics stats = this.internalIndexStats;
        PartitionedIndex prIndex = null;
        if (isPr) {
            prIndex = (PartitionedIndex) this.getPRIndex();
            prIndex.incNumMapKeysStats(mapKey);
        }
        rg = new RangeIndex(indexName + "-" + mapKey, region, fromClause, indexedExpression, projectionAttributes, this.originalFromClause, this.originalIndexedExpression, this.canonicalizedDefinitions, stats);
        // Shobhit: We need evaluator to verify RegionEntry and IndexEntry inconsistency.
        rg.evaluator = this.evaluator;
        this.mapKeyToValueIndex.put(mapKey, rg);
        if (!isPr) {
            this.internalIndexStats.incNumMapIndexKeys(1);
        }
    }
    this.internalIndexStats.incUpdatesInProgress(1);
    long start = System.nanoTime();
    rg.addMapping(indexKey, value, entry);
    // This call is skipped when addMapping is called from MapRangeIndex
    // rg.internalIndexStats.incNumUpdates();
    this.internalIndexStats.incUpdatesInProgress(-1);
    long end = System.nanoTime() - start;
    this.internalIndexStats.incUpdateTime(end);
    this.entryToMapKeysMap.add(entry, mapKey);
}
Also used : IndexStatistics(org.apache.geode.cache.query.IndexStatistics) BucketRegion(org.apache.geode.internal.cache.BucketRegion)

Example 27 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class PartitionedIndex method getBucketIndex.

/**
   * Returns the index for the bucket.
   */
public static AbstractIndex getBucketIndex(PartitionedRegion pr, String indexName, Integer bId) throws QueryInvocationTargetException {
    try {
        pr.checkReadiness();
    } catch (Exception ex) {
        throw new QueryInvocationTargetException(ex.getMessage());
    }
    PartitionedRegionDataStore prds = pr.getDataStore();
    BucketRegion bukRegion;
    bukRegion = (BucketRegion) prds.getLocalBucketById(bId);
    if (bukRegion == null) {
        throw new BucketMovedException("Bucket not found for the id :" + bId);
    }
    AbstractIndex index = null;
    if (bukRegion.getIndexManager() != null) {
        index = (AbstractIndex) (bukRegion.getIndexManager().getIndex(indexName));
    } else {
        if (pr.getCache().getLogger().fineEnabled()) {
            pr.getCache().getLogger().fine("Index Manager not found for the bucket region " + bukRegion.getFullPath() + " unable to fetch the index " + indexName);
        }
        throw new QueryInvocationTargetException("Index Manager not found, " + " unable to fetch the index " + indexName);
    }
    return index;
}
Also used : BucketRegion(org.apache.geode.internal.cache.BucketRegion) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException) BucketMovedException(org.apache.geode.internal.cache.execute.BucketMovedException) PartitionedRegionDataStore(org.apache.geode.internal.cache.PartitionedRegionDataStore) BucketMovedException(org.apache.geode.internal.cache.execute.BucketMovedException) QueryException(org.apache.geode.cache.query.QueryException) IndexNameConflictException(org.apache.geode.cache.query.IndexNameConflictException) TypeMismatchException(org.apache.geode.cache.query.TypeMismatchException) IndexExistsException(org.apache.geode.cache.query.IndexExistsException) QueryInvocationTargetException(org.apache.geode.cache.query.QueryInvocationTargetException)

Example 28 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class HeapLRUCapacityController method createLRUHelper.

@Override
protected EnableLRU createLRUHelper() {
    return new AbstractEnableLRU() {

        /**
       * Indicate what kind of <code>EvictionAlgorithm</code> this helper implements
       */
        public EvictionAlgorithm getEvictionAlgorithm() {
            return EvictionAlgorithm.LRU_HEAP;
        }

        /**
       * As far as we're concerned all entries have the same size
       */
        public int entrySize(Object key, Object value) throws IllegalArgumentException {
            /*
         * if (value != null) { return 1; } else { return 0; }
         */
            if (value == Token.TOMBSTONE) {
                return 0;
            }
            int size = HeapLRUCapacityController.this.getPerEntryOverhead();
            size += sizeof(key);
            size += sizeof(value);
            return size;
        }

        /**
       * In addition to initializing the statistics, create an evictor thread to periodically evict
       * the LRU entry.
       */
        @Override
        public LRUStatistics initStats(Object region, StatisticsFactory sf) {
            setRegionName(region);
            final LRUStatistics stats = new HeapLRUStatistics(sf, getRegionName(), this);
            setStats(stats);
            return stats;
        }

        public StatisticsType getStatisticsType() {
            return statType;
        }

        public String getStatisticsName() {
            return "HeapLRUStatistics";
        }

        public int getLimitStatId() {
            throw new UnsupportedOperationException("Limit not used with this LRU type");
        }

        public int getCountStatId() {
            return statType.nameToId("entryBytes");
        }

        public int getEvictionsStatId() {
            return statType.nameToId("lruEvictions");
        }

        public int getDestroysStatId() {
            return statType.nameToId("lruDestroys");
        }

        public int getDestroysLimitStatId() {
            return statType.nameToId("lruDestroysLimit");
        }

        public int getEvaluationsStatId() {
            return statType.nameToId("lruEvaluations");
        }

        public int getGreedyReturnsStatId() {
            return statType.nameToId("lruGreedyReturns");
        }

        /**
       * Okay, deep breath. Instead of basing the LRU calculation on the number of entries in the
       * region or on their "size" (which turned out to be incorrectly estimated in the general
       * case), we use the amount of memory currently in use. If the amount of memory current in use
       * {@linkplain Runtime#maxMemory max memory} - {@linkplain Runtime#freeMemory free memory} is
       * greater than the overflow threshold, then we evict the LRU entry.
       */
        public boolean mustEvict(LRUStatistics stats, Region region, int delta) {
            final InternalCache cache = (InternalCache) region.getRegionService();
            InternalResourceManager resourceManager = cache.getInternalResourceManager();
            boolean offheap = region.getAttributes().getOffHeap();
            final boolean monitorStateIsEviction = resourceManager.getMemoryMonitor(offheap).getState().isEviction();
            if (region instanceof BucketRegion) {
                return monitorStateIsEviction && ((BucketRegion) region).getSizeForEviction() > 0;
            }
            return monitorStateIsEviction && ((LocalRegion) region).getRegionMap().sizeInVM() > 0;
        }

        @Override
        public boolean lruLimitExceeded(LRUStatistics lruStatistics, DiskRegionView drv) {
            InternalResourceManager resourceManager = drv.getDiskStore().getCache().getInternalResourceManager();
            return resourceManager.getMemoryMonitor(drv.getOffHeap()).getState().isEviction();
        }
    };
}
Also used : InternalCache(org.apache.geode.internal.cache.InternalCache) InternalResourceManager(org.apache.geode.internal.cache.control.InternalResourceManager) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) BucketRegion(org.apache.geode.internal.cache.BucketRegion) LocalRegion(org.apache.geode.internal.cache.LocalRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) StatisticsFactory(org.apache.geode.StatisticsFactory)

Example 29 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class QueryDataFunction method selectWithType.

private QueryDataFunctionResult selectWithType(final FunctionContext context, String queryString, final boolean showMember, final String regionName, final int limit, final int queryResultSetLimit, final int queryCollectionsDepth) throws Exception {
    InternalCache cache = getCache();
    Function localQueryFunc = new LocalQueryFunction("LocalQueryFunction", regionName, showMember).setOptimizeForWrite(true);
    queryString = applyLimitClause(queryString, limit, queryResultSetLimit);
    try {
        TypedJson result = new TypedJson(queryCollectionsDepth);
        Region region = cache.getRegion(regionName);
        if (region == null) {
            throw new Exception(ManagementStrings.QUERY__MSG__REGIONS_NOT_FOUND_ON_MEMBER.toLocalizedString(regionName, cache.getDistributedSystem().getDistributedMember().getId()));
        }
        Object results = null;
        boolean noDataFound = true;
        if (region.getAttributes().getDataPolicy() == DataPolicy.NORMAL) {
            QueryService queryService = cache.getQueryService();
            Query query = queryService.newQuery(queryString);
            results = query.execute();
        } else {
            ResultCollector rcollector;
            PartitionedRegion parRegion = PartitionedRegionHelper.getPartitionedRegion(regionName, cache);
            if (parRegion != null && showMember) {
                if (parRegion.isDataStore()) {
                    Set<BucketRegion> localPrimaryBucketRegions = parRegion.getDataStore().getAllLocalPrimaryBucketRegions();
                    Set<Integer> localPrimaryBucketSet = new HashSet<>();
                    for (BucketRegion bRegion : localPrimaryBucketRegions) {
                        localPrimaryBucketSet.add(bRegion.getId());
                    }
                    LocalDataSet lds = new LocalDataSet(parRegion, localPrimaryBucketSet);
                    DefaultQuery query = (DefaultQuery) cache.getQueryService().newQuery(queryString);
                    results = (SelectResults) lds.executeQuery(query, null, localPrimaryBucketSet);
                }
            } else {
                rcollector = FunctionService.onRegion(cache.getRegion(regionName)).setArguments(queryString).execute(localQueryFunc);
                results = rcollector.getResult();
            }
        }
        if (results != null && results instanceof SelectResults) {
            SelectResults selectResults = (SelectResults) results;
            for (Object object : selectResults) {
                result.add(RESULT_KEY, object);
                noDataFound = false;
            }
        } else if (results != null && results instanceof ArrayList) {
            ArrayList listResults = (ArrayList) results;
            ArrayList actualResult = (ArrayList) listResults.get(0);
            for (Object object : actualResult) {
                result.add(RESULT_KEY, object);
                noDataFound = false;
            }
        }
        if (!noDataFound && showMember) {
            result.add(MEMBER_KEY, cache.getDistributedSystem().getDistributedMember().getId());
        }
        if (noDataFound) {
            return new QueryDataFunctionResult(QUERY_EXEC_SUCCESS, BeanUtilFuncs.compress(new JsonisedErrorMessage(NO_DATA_FOUND).toString()));
        }
        return new QueryDataFunctionResult(QUERY_EXEC_SUCCESS, BeanUtilFuncs.compress(result.toString()));
    } catch (Exception e) {
        logger.warn(e.getMessage(), e);
        throw e;
    }
}
Also used : DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) Query(org.apache.geode.cache.query.Query) DefaultQuery(org.apache.geode.cache.query.internal.DefaultQuery) TypedJson(org.apache.geode.management.internal.cli.json.TypedJson) ArrayList(java.util.ArrayList) InternalCache(org.apache.geode.internal.cache.InternalCache) GfJsonException(org.apache.geode.management.internal.cli.json.GfJsonException) FunctionException(org.apache.geode.cache.execute.FunctionException) QueryInvalidException(org.apache.geode.cache.query.QueryInvalidException) Function(org.apache.geode.cache.execute.Function) SelectResults(org.apache.geode.cache.query.SelectResults) BucketRegion(org.apache.geode.internal.cache.BucketRegion) QueryService(org.apache.geode.cache.query.QueryService) LocalDataSet(org.apache.geode.internal.cache.LocalDataSet) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) GfJsonObject(org.apache.geode.management.internal.cli.json.GfJsonObject) ResultCollector(org.apache.geode.cache.execute.ResultCollector) HashSet(java.util.HashSet)

Example 30 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class Bug38741DUnitTest method testPartitionedRegionAndCopyOnRead.

/**
   * Test to ensure that a PartitionedRegion doesn't make more than the expected number of copies
   * when copy-on-read is set to true
   * 
   * @throws Exception
   */
@Test
public void testPartitionedRegionAndCopyOnRead() throws Exception {
    final Host h = Host.getHost(0);
    final VM accessor = h.getVM(2);
    final VM datastore = h.getVM(3);
    final String rName = getUniqueName();
    final String k1 = "k1";
    datastore.invoke(new CacheSerializableRunnable("Create PR DataStore") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(0).create());
            createRootRegion(rName, factory.create());
        }
    });
    accessor.invoke(new CacheSerializableRunnable("Create PR Accessor and put new value") {

        public void run2() throws CacheException {
            AttributesFactory factory = new AttributesFactory();
            factory.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).setRedundantCopies(0).create());
            Region r = createRootRegion(rName, factory.create());
            SerializationCountingValue val = new SerializationCountingValue();
            r.put(k1, val);
            // First put to a bucket will serialize once to determine the size of the value
            // to know how much extra space the new bucket with the new entry will consume
            // and serialize again to send the bytes
            assertEquals(2, val.count.get());
            // A put to an already created bucket should only be serialized once
            val = new SerializationCountingValue();
            r.put(k1, val);
            assertEquals(1, val.count.get());
        }
    });
    datastore.invoke(new CacheSerializableRunnable("assert datastore entry serialization count") {

        public void run2() throws CacheException {
            PartitionedRegion pr = (PartitionedRegion) getRootRegion(rName);
            // Visit the one bucket (since there is only one value in the entire PR)
            // to directly copy the entry bytes and assert the serialization count.
            // All this extra work is to assure the serialization count does not increase
            // (by de-serializing the value stored in the map, which would then have to be
            // re-serialized).
            pr.getDataStore().visitBuckets(new BucketVisitor() {

                public void visit(Integer bucketId, Region r) {
                    BucketRegion br = (BucketRegion) r;
                    try {
                        KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
                        RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
                        Object val = rv.getRawValue();
                        assertTrue(val instanceof CachedDeserializable);
                        CachedDeserializable cd = (CachedDeserializable) val;
                        SerializationCountingValue scv = (SerializationCountingValue) cd.getDeserializedForReading();
                        assertEquals(1, scv.count.get());
                    } catch (IOException fail) {
                        Assert.fail("Unexpected IOException", fail);
                    }
                }
            });
        }
    });
    accessor.invoke(new CacheSerializableRunnable("assert accessor entry serialization count") {

        public void run2() throws CacheException {
            Region r = getRootRegion(rName);
            SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
            // The counter was incremented once to send the data to the datastore
            assertEquals(1, v1.count.get());
            getCache().setCopyOnRead(true);
            // Once to send the data to the datastore, no need to do a serialization
            // when we make copy since it is serialized from datastore to us.
            SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
            assertEquals(1, v2.count.get());
            assertTrue(v1 != v2);
        }
    });
    datastore.invoke(new CacheSerializableRunnable("assert value serialization") {

        public void run2() throws CacheException {
            Region r = getRootRegion(rName);
            SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
            // Once to send the value from the accessor to the data store
            assertEquals(1, v1.count.get());
            getCache().setCopyOnRead(true);
            // Once to send the value from the accessor to the data store
            // once to make a local copy
            SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
            assertEquals(2, v2.count.get());
            assertTrue(v1 != v2);
        }
    });
}
Also used : CachedDeserializable(org.apache.geode.internal.cache.CachedDeserializable) CacheException(org.apache.geode.cache.CacheException) Host(org.apache.geode.test.dunit.Host) BucketVisitor(org.apache.geode.internal.cache.PartitionedRegionDataStore.BucketVisitor) IOException(java.io.IOException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) BucketRegion(org.apache.geode.internal.cache.BucketRegion) KeyInfo(org.apache.geode.internal.cache.KeyInfo) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) LocalRegion(org.apache.geode.internal.cache.LocalRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) RawValue(org.apache.geode.internal.cache.BucketRegion.RawValue) ClientServerTest(org.apache.geode.test.junit.categories.ClientServerTest) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Aggregations

BucketRegion (org.apache.geode.internal.cache.BucketRegion)55 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)35 Region (org.apache.geode.cache.Region)13 Test (org.junit.Test)13 Bucket (org.apache.geode.internal.cache.partitioned.Bucket)11 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)11 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)11 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)10 ArrayList (java.util.ArrayList)9 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)9 HashMap (java.util.HashMap)7 LocalRegion (org.apache.geode.internal.cache.LocalRegion)7 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)7 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)6 HashSet (java.util.HashSet)5 GatewaySender (org.apache.geode.cache.wan.GatewaySender)5 Map (java.util.Map)4 AttributesFactory (org.apache.geode.cache.AttributesFactory)4 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)4 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)4