Search in sources :

Example 51 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class AbstractPartitionedRepositoryManager method getRepositories.

@Override
public Collection<IndexRepository> getRepositories(RegionFunctionContext ctx) throws BucketNotFoundException {
    Region<Object, Object> region = ctx.getDataSet();
    Set<Integer> buckets = ((InternalRegionFunctionContext) ctx).getLocalBucketSet(region);
    ArrayList<IndexRepository> repos = new ArrayList<IndexRepository>(buckets.size());
    for (Integer bucketId : buckets) {
        BucketRegion userBucket = userRegion.getDataStore().getLocalBucketById(bucketId);
        if (userBucket == null) {
            throw new BucketNotFoundException("User bucket was not found for region " + region + "bucket id " + bucketId);
        } else {
            repos.add(getRepository(userBucket.getId()));
        }
    }
    return repos;
}
Also used : IndexRepository(org.apache.geode.cache.lucene.internal.repository.IndexRepository) BucketRegion(org.apache.geode.internal.cache.BucketRegion) ArrayList(java.util.ArrayList) InternalRegionFunctionContext(org.apache.geode.internal.cache.execute.InternalRegionFunctionContext) BucketNotFoundException(org.apache.geode.internal.cache.BucketNotFoundException)

Example 52 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class IndexRepositoryFactory method computeIndexRepository.

public IndexRepository computeIndexRepository(final Integer bucketId, LuceneSerializer serializer, LuceneIndexImpl index, PartitionedRegion userRegion, final IndexRepository oldRepository) throws IOException {
    LuceneIndexForPartitionedRegion indexForPR = (LuceneIndexForPartitionedRegion) index;
    final PartitionedRegion fileRegion = indexForPR.getFileAndChunkRegion();
    BucketRegion fileAndChunkBucket = getMatchingBucket(fileRegion, bucketId);
    BucketRegion dataBucket = getMatchingBucket(userRegion, bucketId);
    boolean success = false;
    if (fileAndChunkBucket == null) {
        if (oldRepository != null) {
            oldRepository.cleanup();
        }
        return null;
    }
    if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
        if (oldRepository != null) {
            oldRepository.cleanup();
        }
        return null;
    }
    if (oldRepository != null && !oldRepository.isClosed()) {
        return oldRepository;
    }
    if (oldRepository != null) {
        oldRepository.cleanup();
    }
    DistributedLockService lockService = getLockService();
    String lockName = getLockName(fileAndChunkBucket);
    while (!lockService.lock(lockName, 100, -1)) {
        if (!fileAndChunkBucket.getBucketAdvisor().isPrimary()) {
            return null;
        }
    }
    final IndexRepository repo;
    try {
        RegionDirectory dir = new RegionDirectory(getBucketTargetingMap(fileAndChunkBucket, bucketId), indexForPR.getFileSystemStats());
        IndexWriterConfig config = new IndexWriterConfig(indexForPR.getAnalyzer());
        IndexWriter writer = new IndexWriter(dir, config);
        repo = new IndexRepositoryImpl(fileAndChunkBucket, writer, serializer, indexForPR.getIndexStats(), dataBucket, lockService, lockName);
        success = true;
        return repo;
    } catch (IOException e) {
        logger.info("Exception thrown while constructing Lucene Index for bucket:" + bucketId + " for file region:" + fileAndChunkBucket.getFullPath());
        throw e;
    } finally {
        if (!success) {
            lockService.unlock(lockName);
        }
    }
}
Also used : IndexRepository(org.apache.geode.cache.lucene.internal.repository.IndexRepository) BucketRegion(org.apache.geode.internal.cache.BucketRegion) IndexWriter(org.apache.lucene.index.IndexWriter) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) DistributedLockService(org.apache.geode.distributed.DistributedLockService) IOException(java.io.IOException) RegionDirectory(org.apache.geode.cache.lucene.internal.directory.RegionDirectory) IndexWriterConfig(org.apache.lucene.index.IndexWriterConfig) IndexRepositoryImpl(org.apache.geode.cache.lucene.internal.repository.IndexRepositoryImpl)

Example 53 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class DistributedTransactionDUnitTest method testConcurrentTXAndNonTXOperations.

/*
   * Test to reproduce a scenario where: 1. On primary, the tx op is applied first followed by
   * non-tx 2. On secondary, non-tx op is applied first followed by tx.
   */
@Ignore
@Test
public void testConcurrentTXAndNonTXOperations() throws Exception {
    Host host = Host.getHost(0);
    final VM server1 = host.getVM(0);
    final VM server2 = host.getVM(1);
    createPersistentPR(new VM[] { server1 });
    execute(server1, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
            CustId custIdOne = new CustId(1);
            Customer customerOne = new Customer("name1", "addr1");
            prRegion.put(custIdOne, customerOne);
            BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
            String primaryMember = br.getBucketAdvisor().getPrimary().toString();
            getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
            String memberId = getGemfireCache().getDistributedSystem().getMemberId();
            getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
            return null;
        }
    });
    createPersistentPR(new VM[] { server2 });
    Boolean isPrimary = (Boolean) execute(server1, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
            CustId custIdOne = new CustId(1);
            BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custIdOne);
            String primaryMember = br.getBucketAdvisor().getPrimary().toString();
            getGemfireCache().getLoggerI18n().fine("TEST:PRIMARY:" + primaryMember);
            String memberId = getGemfireCache().getDistributedSystem().getMemberId();
            getGemfireCache().getLoggerI18n().fine("TEST:MEMBERID:" + memberId);
            return memberId.equals(primaryMember);
        }
    });
    final VM primary = isPrimary.booleanValue() ? server1 : server2;
    final VM secondary = !isPrimary.booleanValue() ? server1 : server2;
    System.out.println("TEST:SERVER-1:VM-" + server1.getPid());
    System.out.println("TEST:SERVER-2:VM-" + server2.getPid());
    System.out.println("TEST:PRIMARY=VM-" + primary.getPid());
    System.out.println("TEST:SECONDARY=VM-" + secondary.getPid());
    class WaitRelease implements Runnable {

        CountDownLatch cdl;

        String op;

        public WaitRelease(CountDownLatch cdl, String member) {
            this.cdl = cdl;
        }

        @Override
        public void run() {
            try {
                GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX WAITING - " + op);
                cdl.await();
                GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX END WAITING");
            } catch (InterruptedException e) {
            }
        }

        public void release() {
            GemFireCacheImpl.getExisting().getLoggerI18n().fine("TEST:TX COUNTDOWN - " + op);
            cdl.countDown();
        }
    }
    // Install TX hook
    SerializableCallable txHook = new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            CountDownLatch cdl = new CountDownLatch(1);
            DistTXState.internalBeforeApplyChanges = new WaitRelease(cdl, "TX OP");
            return null;
        }
    };
    execute(secondary, txHook);
    // Install non-TX hook
    SerializableCallable nontxHook = new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            CountDownLatch cdl = new CountDownLatch(1);
            DistTXState.internalBeforeNonTXBasicPut = new WaitRelease(cdl, "NON TX OP");
            return null;
        }
    };
    // Install the wait-release hook on the secondary
    execute(secondary, nontxHook);
    // Start a tx operation on primary
    execute(primary, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            // The reason this is run in a separate thread instead of controller thread
            // is that this is going to block because the secondary is going to wait.
            new Thread() {

                public void run() {
                    CacheTransactionManager mgr = getGemfireCache().getTxManager();
                    mgr.setDistributed(true);
                    getGemfireCache().getLoggerI18n().fine("TEST:DISTTX=" + mgr.isDistributed());
                    mgr.begin();
                    Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
                    CustId custIdOne = new CustId(1);
                    Customer customerOne = new Customer("name1_tx", "addr1");
                    getGemfireCache().getLoggerI18n().fine("TEST:TX UPDATE");
                    prRegion.put(custIdOne, customerOne);
                    getGemfireCache().getLoggerI18n().fine("TEST:TX COMMIT");
                    mgr.commit();
                }
            }.start();
            return null;
        }
    });
    // Let the TX op be applied on primary first
    Thread.currentThread().sleep(200);
    // Perform a non-tx op on the same key on primary
    execute(primary, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
            CustId custIdOne = new CustId(1);
            Customer customerOne = new Customer("name1_nontx", "addr1");
            getGemfireCache().getLoggerI18n().fine("TEST:TX NONTXUPDATE");
            prRegion.put(custIdOne, customerOne);
            return null;
        }
    });
    // Wait for a few milliseconds
    Thread.currentThread().sleep(200);
    // Release the waiting non-tx op first, on secondary
    execute(secondary, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Runnable r = DistTXState.internalBeforeNonTXBasicPut;
            assert (r != null && r instanceof WaitRelease);
            WaitRelease e = (WaitRelease) r;
            e.release();
            return null;
        }
    });
    // Now release the waiting commit on secondary
    execute(secondary, new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Runnable r = DistTXState.internalBeforeApplyChanges;
            assert (r != null && r instanceof WaitRelease);
            WaitRelease e = (WaitRelease) r;
            e.release();
            return null;
        }
    });
    // Verify region and entry versions on primary and secondary
    SerializableCallable verifyPrimary = new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
            CustId custId = new CustId(1);
            Customer customer = prRegion.get(custId);
            BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
            RegionEntry re = br.getRegionEntry(custId);
            getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY CUSTOMER=" + customer);
            getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
            getGemfireCache().getLoggerI18n().fine("TEST:TX PRIMARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
            return null;
        }
    };
    execute(primary, verifyPrimary);
    SerializableCallable verifySecondary = new SerializableCallable() {

        @Override
        public Object call() throws Exception {
            Region<CustId, Customer> prRegion = getCache().getRegion(PERSISTENT_CUSTOMER_PR);
            CustId custId = new CustId(1);
            Customer customer = prRegion.get(custId);
            BucketRegion br = ((PartitionedRegion) prRegion).getBucketRegion(custId);
            RegionEntry re = br.getRegionEntry(custId);
            getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY CUSTOMER=" + customer);
            getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY REGION VERSION=" + re.getVersionStamp().getRegionVersion());
            getGemfireCache().getLoggerI18n().fine("TEST:TX SECONDARY ENTRY VERSION=" + re.getVersionStamp().getEntryVersion());
            return null;
        }
    };
    execute(secondary, verifySecondary);
}
Also used : Customer(org.apache.geode.internal.cache.execute.data.Customer) Host(org.apache.geode.test.dunit.Host) CountDownLatch(java.util.concurrent.CountDownLatch) CommitConflictException(org.apache.geode.cache.CommitConflictException) CommitIncompleteException(org.apache.geode.cache.CommitIncompleteException) CacheTransactionManager(org.apache.geode.cache.CacheTransactionManager) BucketRegion(org.apache.geode.internal.cache.BucketRegion) CustId(org.apache.geode.internal.cache.execute.data.CustId) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) LocalRegion(org.apache.geode.internal.cache.LocalRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) RegionEntry(org.apache.geode.internal.cache.RegionEntry) Ignore(org.junit.Ignore) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 54 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class WANTestBase method validateParallelSenderQueueAllBucketsDrained.

public static void validateParallelSenderQueueAllBucketsDrained(final String senderId) {
    IgnoredException exp = IgnoredException.addIgnoredException(RegionDestroyedException.class.getName());
    IgnoredException exp1 = IgnoredException.addIgnoredException(ForceReattemptException.class.getName());
    try {
        Set<GatewaySender> senders = cache.getGatewaySenders();
        GatewaySender sender = null;
        for (GatewaySender s : senders) {
            if (s.getId().equals(senderId)) {
                sender = s;
                break;
            }
        }
        RegionQueue regionQueue = ((AbstractGatewaySender) sender).getQueues().toArray(new RegionQueue[1])[0];
        Set<BucketRegion> buckets = ((PartitionedRegion) regionQueue.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions();
        for (final BucketRegion bucket : buckets) {
            Awaitility.await().atMost(180, TimeUnit.SECONDS).until(() -> {
                assertEquals("Expected bucket entries for bucket: " + bucket.getId() + " is: 0 but actual entries: " + bucket.keySet().size() + " This bucket isPrimary: " + bucket.getBucketAdvisor().isPrimary() + " KEYSET: " + bucket.keySet(), 0, bucket.keySet().size());
            });
        }
    // for loop ends
    } finally {
        exp.remove();
        exp1.remove();
    }
}
Also used : GatewaySender(org.apache.geode.cache.wan.GatewaySender) ForceReattemptException(org.apache.geode.internal.cache.ForceReattemptException) BucketRegion(org.apache.geode.internal.cache.BucketRegion) RegionDestroyedException(org.apache.geode.cache.RegionDestroyedException) IgnoredException(org.apache.geode.test.dunit.IgnoredException) RegionQueue(org.apache.geode.internal.cache.RegionQueue)

Example 55 with BucketRegion

use of org.apache.geode.internal.cache.BucketRegion in project geode by apache.

the class WANTestBase method validateParallelSenderQueueBucketSize.

public static void validateParallelSenderQueueBucketSize(final String senderId, final int bucketSize) {
    Set<GatewaySender> senders = cache.getGatewaySenders();
    GatewaySender sender = null;
    for (GatewaySender s : senders) {
        if (s.getId().equals(senderId)) {
            sender = s;
            break;
        }
    }
    RegionQueue regionQueue = ((AbstractGatewaySender) sender).getQueues().toArray(new RegionQueue[1])[0];
    Set<BucketRegion> buckets = ((PartitionedRegion) regionQueue.getRegion()).getDataStore().getAllLocalPrimaryBucketRegions();
    for (BucketRegion bucket : buckets) {
        assertEquals("Expected bucket entries for bucket " + bucket.getId() + " is different than actual.", bucketSize, bucket.keySet().size());
    }
}
Also used : GatewaySender(org.apache.geode.cache.wan.GatewaySender) BucketRegion(org.apache.geode.internal.cache.BucketRegion) RegionQueue(org.apache.geode.internal.cache.RegionQueue)

Aggregations

BucketRegion (org.apache.geode.internal.cache.BucketRegion)55 PartitionedRegion (org.apache.geode.internal.cache.PartitionedRegion)35 Region (org.apache.geode.cache.Region)13 Test (org.junit.Test)13 Bucket (org.apache.geode.internal.cache.partitioned.Bucket)11 SerializableCallable (org.apache.geode.test.dunit.SerializableCallable)11 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)11 InternalDistributedMember (org.apache.geode.distributed.internal.membership.InternalDistributedMember)10 ArrayList (java.util.ArrayList)9 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)9 HashMap (java.util.HashMap)7 LocalRegion (org.apache.geode.internal.cache.LocalRegion)7 PartitionedRegionDataStore (org.apache.geode.internal.cache.PartitionedRegionDataStore)7 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)6 HashSet (java.util.HashSet)5 GatewaySender (org.apache.geode.cache.wan.GatewaySender)5 Map (java.util.Map)4 AttributesFactory (org.apache.geode.cache.AttributesFactory)4 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)4 RegionDestroyedException (org.apache.geode.cache.RegionDestroyedException)4