Search in sources :

Example 96 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class MyGatewayEventSubstitutionFilter method createDiskStore.

private static void createDiskStore(String asyncChannelId, String diskStoreName) {
    if (diskStoreName != null) {
        File directory = new File(asyncChannelId + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
        directory.mkdir();
        File[] dirs1 = new File[] { directory };
        DiskStoreFactory dsf = cache.createDiskStoreFactory();
        dsf.setDiskDirs(dirs1);
        DiskStore ds = dsf.create(diskStoreName);
    }
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Example 97 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class ClusterConfigurationServiceDUnitTest method testSharedConfigurationService.

@Test
public void testSharedConfigurationService() throws Exception {
    // Start the Locator and wait for shared configuration to be available
    final String testGroup = "G1";
    final String clusterLogLevel = "error";
    final String groupLogLevel = "fine";
    final String testName = getName();
    final VM locator1Vm = getHost(0).getVM(1);
    final VM dataMemberVm = getHost(0).getVM(2);
    final VM locator2Vm = getHost(0).getVM(3);
    final int[] ports = getRandomAvailableTCPPorts(3);
    final int locator1Port = ports[0];
    locator1Vm.invoke(() -> {
        final File locatorLogFile = new File(testName + "-locator-" + locator1Port + ".log");
        final Properties locatorProps = new Properties();
        locatorProps.setProperty(NAME, "Locator1");
        locatorProps.setProperty(MCAST_PORT, "0");
        locatorProps.setProperty(LOG_LEVEL, "info");
        locatorProps.setProperty(ENABLE_CLUSTER_CONFIGURATION, "true");
        try {
            final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator1Port, locatorLogFile, null, locatorProps);
            WaitCriterion wc = new WaitCriterion() {

                @Override
                public boolean done() {
                    return locator.isSharedConfigurationRunning();
                }

                @Override
                public String description() {
                    return "Waiting for shared configuration to be started";
                }
            };
            waitForCriterion(wc, TIMEOUT, INTERVAL, true);
        } catch (IOException e) {
            fail("Unable to create a locator with a shared configuration", e);
        }
    });
    XmlEntity xmlEntity = dataMemberVm.invoke(() -> {
        Properties localProps = new Properties();
        localProps.setProperty(MCAST_PORT, "0");
        localProps.setProperty(LOCATORS, "localhost[" + locator1Port + "]");
        localProps.setProperty(GROUPS, testGroup);
        getSystem(localProps);
        Cache cache = getCache();
        assertNotNull(cache);
        DiskStoreFactory dsFactory = cache.createDiskStoreFactory();
        File dsDir = new File("dsDir");
        if (!dsDir.exists()) {
            dsDir.mkdir();
        }
        dsFactory.setDiskDirs(new File[] { dsDir });
        dsFactory.create(DISKSTORENAME);
        RegionFactory regionFactory = getCache().createRegionFactory(RegionShortcut.REPLICATE);
        regionFactory.create(REGION1);
        return new XmlEntity(CacheXml.REGION, "name", REGION1);
    });
    locator1Vm.invoke(() -> {
        ClusterConfigurationService sc = InternalLocator.getLocator().getSharedConfiguration();
        sc.addXmlEntity(xmlEntity, new String[] { testGroup });
        // Modify property and cache attributes
        Properties clusterProperties = new Properties();
        clusterProperties.setProperty(LOG_LEVEL, clusterLogLevel);
        XmlEntity cacheEntity = XmlEntity.builder().withType(CacheXml.CACHE).build();
        Map<String, String> cacheAttributes = new HashMap<String, String>();
        cacheAttributes.put(CacheXml.COPY_ON_READ, "true");
        sc.modifyXmlAndProperties(clusterProperties, cacheEntity, null);
        clusterProperties.setProperty(LOG_LEVEL, groupLogLevel);
        sc.modifyXmlAndProperties(clusterProperties, cacheEntity, new String[] { testGroup });
        // Add a jar
        byte[][] jarBytes = new byte[1][];
        jarBytes[0] = "Hello".getBytes();
        assertTrue(sc.addJarsToThisLocator(new String[] { "foo.jar" }, jarBytes, null));
        // Add a jar for the group
        jarBytes = new byte[1][];
        jarBytes[0] = "Hello".getBytes();
        assertTrue(sc.addJarsToThisLocator(new String[] { "bar.jar" }, jarBytes, new String[] { testGroup }));
    });
    final int locator2Port = ports[1];
    // Create another locator in VM2
    locator2Vm.invoke(() -> {
        final File locatorLogFile = new File(testName + "-locator-" + locator2Port + ".log");
        final Properties locatorProps = new Properties();
        locatorProps.setProperty(NAME, "Locator2");
        locatorProps.setProperty(MCAST_PORT, "0");
        locatorProps.setProperty(LOG_LEVEL, "info");
        locatorProps.setProperty(ENABLE_CLUSTER_CONFIGURATION, "true");
        locatorProps.setProperty(LOCATORS, "localhost[" + locator1Port + "]");
        try {
            final InternalLocator locator = (InternalLocator) Locator.startLocatorAndDS(locator2Port, locatorLogFile, null, locatorProps);
            WaitCriterion wc = new WaitCriterion() {

                @Override
                public boolean done() {
                    return locator.isSharedConfigurationRunning();
                }

                @Override
                public String description() {
                    return "Waiting for shared configuration to be started";
                }
            };
            waitForCriterion(wc, TIMEOUT, INTERVAL, true);
        } catch (IOException e) {
            fail("Unable to create a locator with a shared configuration", e);
        }
        InternalLocator locator = (InternalLocator) Locator.getLocator();
        ClusterConfigurationService sharedConfig = locator.getSharedConfiguration();
        Map<String, Configuration> entireConfiguration = sharedConfig.getEntireConfiguration();
        Configuration clusterConfig = entireConfiguration.get(ClusterConfigurationService.CLUSTER_CONFIG);
        assertNotNull(clusterConfig);
        assertNotNull(clusterConfig.getJarNames());
        assertTrue(clusterConfig.getJarNames().contains("foo.jar"));
        assertTrue(clusterConfig.getGemfireProperties().getProperty(LOG_LEVEL).equals(clusterLogLevel));
        assertNotNull(clusterConfig.getCacheXmlContent());
        Configuration testGroupConfiguration = entireConfiguration.get(testGroup);
        assertNotNull(testGroupConfiguration);
        assertNotNull(testGroupConfiguration.getJarNames());
        assertTrue(testGroupConfiguration.getJarNames().contains("bar.jar"));
        assertTrue(testGroupConfiguration.getGemfireProperties().getProperty(LOG_LEVEL).equals(groupLogLevel));
        assertNotNull(testGroupConfiguration.getCacheXmlContent());
        assertTrue(testGroupConfiguration.getCacheXmlContent().contains(REGION1));
        Map<String, byte[]> jarData = sharedConfig.getAllJarsFromThisLocator(entireConfiguration.keySet());
        String[] jarNames = jarData.keySet().stream().toArray(String[]::new);
        byte[][] jarBytes = jarData.values().toArray(new byte[jarNames.length][]);
        assertNotNull(jarNames);
        assertNotNull(jarBytes);
        sharedConfig.deleteXmlEntity(new XmlEntity(CacheXml.REGION, "name", REGION1), new String[] { testGroup });
        sharedConfig.removeJars(new String[] { "foo.jar" }, null);
        sharedConfig.removeJars(null, null);
    });
    dataMemberVm.invoke(() -> {
        Set<String> groups = new HashSet<String>();
        groups.add(testGroup);
        ConfigurationRequest configRequest = new ConfigurationRequest(groups);
        ConfigurationResponse configResponse = (ConfigurationResponse) new TcpClient().requestToServer(InetAddress.getByName("localhost"), locator2Port, configRequest, 1000);
        assertNotNull(configResponse);
        Map<String, Configuration> requestedConfiguration = configResponse.getRequestedConfiguration();
        Configuration clusterConfiguration = requestedConfiguration.get(ClusterConfigurationService.CLUSTER_CONFIG);
        assertNotNull(clusterConfiguration);
        assertTrue(configResponse.getJarNames().length == 0);
        assertTrue(configResponse.getJars().length == 0);
        assertTrue(clusterConfiguration.getJarNames().isEmpty());
        assertTrue(clusterConfiguration.getGemfireProperties().getProperty(LOG_LEVEL).equals(clusterLogLevel));
        Configuration testGroupConfiguration = requestedConfiguration.get(testGroup);
        assertNotNull(testGroupConfiguration);
        assertFalse(testGroupConfiguration.getCacheXmlContent().contains(REGION1));
        assertTrue(testGroupConfiguration.getJarNames().isEmpty());
        assertTrue(testGroupConfiguration.getGemfireProperties().getProperty(LOG_LEVEL).equals(groupLogLevel));
        GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
        Map<InternalDistributedMember, Collection<String>> locatorsWithSharedConfiguration = cache.getDistributionManager().getAllHostedLocatorsWithSharedConfiguration();
        assertFalse(locatorsWithSharedConfiguration.isEmpty());
        assertTrue(locatorsWithSharedConfiguration.size() == 2);
        Set<InternalDistributedMember> locatorMembers = locatorsWithSharedConfiguration.keySet();
        for (InternalDistributedMember locatorMember : locatorMembers) {
            System.out.println(locatorMember);
        }
        return null;
    });
}
Also used : ConfigurationResponse(org.apache.geode.management.internal.configuration.messages.ConfigurationResponse) Configuration(org.apache.geode.management.internal.configuration.domain.Configuration) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) RegionFactory(org.apache.geode.cache.RegionFactory) TcpClient(org.apache.geode.distributed.internal.tcpserver.TcpClient) GemFireCacheImpl(org.apache.geode.internal.cache.GemFireCacheImpl) IOException(java.io.IOException) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) XmlEntity(org.apache.geode.management.internal.configuration.domain.XmlEntity) InternalLocator(org.apache.geode.distributed.internal.InternalLocator) WaitCriterion(org.apache.geode.test.dunit.WaitCriterion) ClusterConfigurationService(org.apache.geode.distributed.internal.ClusterConfigurationService) InternalDistributedMember(org.apache.geode.distributed.internal.membership.InternalDistributedMember) ConfigurationRequest(org.apache.geode.management.internal.configuration.messages.ConfigurationRequest) VM(org.apache.geode.test.dunit.VM) File(java.io.File) Cache(org.apache.geode.cache.Cache) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 98 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class SerialGatewaySenderQueueDUnitTest method test_ValidateSerialGatewaySenderQueueAttributes_1.

/**
   * Test to validate that serial gateway sender queue diskSynchronous attribute when persistence of
   * sender is enabled.
   */
@Test
public void test_ValidateSerialGatewaySenderQueueAttributes_1() {
    Integer localLocPort = (Integer) vm0.invoke(() -> WANTestBase.createFirstLocatorWithDSId(1));
    Integer remoteLocPort = (Integer) vm1.invoke(() -> WANTestBase.createFirstRemoteLocator(2, localLocPort));
    WANTestBase test = new WANTestBase(getTestMethodName());
    Properties props = test.getDistributedSystemProperties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "localhost[" + localLocPort + "]");
    InternalDistributedSystem ds = test.getSystem(props);
    cache = CacheFactory.create(ds);
    File directory = new File("TKSender" + "_disk_" + System.currentTimeMillis() + "_" + VM.getCurrentVMNum());
    directory.mkdir();
    File[] dirs1 = new File[] { directory };
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setDiskDirs(dirs1);
    DiskStore diskStore = dsf.create("FORNY");
    GatewaySenderFactory fact = cache.createGatewaySenderFactory();
    fact.setBatchConflationEnabled(true);
    fact.setBatchSize(200);
    fact.setBatchTimeInterval(300);
    // enable the persistence
    fact.setPersistenceEnabled(true);
    fact.setDiskSynchronous(true);
    fact.setDiskStoreName("FORNY");
    fact.setMaximumQueueMemory(200);
    fact.setAlertThreshold(1200);
    GatewayEventFilter myEventFilter1 = new MyGatewayEventFilter1();
    fact.addGatewayEventFilter(myEventFilter1);
    GatewayTransportFilter myStreamFilter1 = new MyGatewayTransportFilter1();
    fact.addGatewayTransportFilter(myStreamFilter1);
    GatewayTransportFilter myStreamFilter2 = new MyGatewayTransportFilter2();
    fact.addGatewayTransportFilter(myStreamFilter2);
    final IgnoredException exTKSender = IgnoredException.addIgnoredException("Could not connect");
    try {
        GatewaySender sender1 = fact.create("TKSender", 2);
        AttributesFactory factory = new AttributesFactory();
        factory.addGatewaySenderId(sender1.getId());
        factory.setDataPolicy(DataPolicy.PARTITION);
        Region region = cache.createRegionFactory(factory.create()).create("test_ValidateGatewaySenderAttributes");
        Set<GatewaySender> senders = cache.getGatewaySenders();
        assertEquals(senders.size(), 1);
        GatewaySender gatewaySender = senders.iterator().next();
        Set<RegionQueue> regionQueues = ((AbstractGatewaySender) gatewaySender).getQueues();
        assertEquals(regionQueues.size(), GatewaySender.DEFAULT_DISPATCHER_THREADS);
        RegionQueue regionQueue = regionQueues.iterator().next();
        assertEquals(true, regionQueue.getRegion().getAttributes().isDiskSynchronous());
    } finally {
        exTKSender.remove();
    }
}
Also used : GatewaySender(org.apache.geode.cache.wan.GatewaySender) AbstractGatewaySender(org.apache.geode.internal.cache.wan.AbstractGatewaySender) WANTestBase(org.apache.geode.internal.cache.wan.WANTestBase) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) MyGatewayTransportFilter1(org.apache.geode.cache30.MyGatewayTransportFilter1) MyGatewayTransportFilter2(org.apache.geode.cache30.MyGatewayTransportFilter2) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) RegionQueue(org.apache.geode.internal.cache.RegionQueue) DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) MyGatewayEventFilter1(org.apache.geode.cache30.MyGatewayEventFilter1) GatewaySenderFactory(org.apache.geode.cache.wan.GatewaySenderFactory) IgnoredException(org.apache.geode.test.dunit.IgnoredException) Region(org.apache.geode.cache.Region) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) GatewayTransportFilter(org.apache.geode.cache.wan.GatewayTransportFilter) AbstractGatewaySender(org.apache.geode.internal.cache.wan.AbstractGatewaySender) File(java.io.File) GatewayEventFilter(org.apache.geode.cache.wan.GatewayEventFilter) Test(org.junit.Test) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest)

Example 99 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class RebalanceOperationDUnitTest method recoverRedundancyWithOfflinePersistence.

public void recoverRedundancyWithOfflinePersistence(final boolean simulate, final boolean useAccessor) throws Throwable {
    Host host = Host.getHost(0);
    VM vm0 = host.getVM(0);
    final VM vm1 = host.getVM(1);
    VM vm2 = host.getVM(2);
    VM vm3 = host.getVM(3);
    SerializableRunnable createPrRegion = new SerializableRunnable("createRegion") {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create(getUniqueName());
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
            attr.setDiskSynchronous(true);
            attr.setDiskStoreName(getUniqueName());
            cache.createRegion("region1", attr.create());
        }
    };
    // Create the region in only 2 VMs
    vm0.invoke(createPrRegion);
    vm1.invoke(createPrRegion);
    VM rebalanceVM;
    SerializableRunnable createAccessor = new SerializableRunnable(("createAccessor")) {

        public void run() {
            Cache cache = getCache();
            DiskStoreFactory dsf = cache.createDiskStoreFactory();
            DiskStore ds1 = dsf.setDiskDirs(getDiskDirs()).create("ds1");
            AttributesFactory attr = new AttributesFactory();
            PartitionAttributesFactory paf = new PartitionAttributesFactory();
            paf.setRedundantCopies(1);
            paf.setRecoveryDelay(-1);
            paf.setStartupRecoveryDelay(-1);
            paf.setLocalMaxMemory(0);
            PartitionAttributes prAttr = paf.create();
            attr.setPartitionAttributes(prAttr);
            cache.createRegion("region1", attr.create());
        }
    };
    if (useAccessor) {
        // Create an accessor and reblance from that VM
        vm3.invoke(createAccessor);
        rebalanceVM = vm3;
    } else {
        rebalanceVM = vm0;
    }
    // Create some buckets
    vm0.invoke(new SerializableRunnable("createSomeBuckets") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            region.put(Integer.valueOf(1), "A");
            region.put(Integer.valueOf(2), "A");
            region.put(Integer.valueOf(3), "A");
            region.put(Integer.valueOf(4), "A");
            region.put(Integer.valueOf(5), "A");
            region.put(Integer.valueOf(6), "A");
        }
    });
    SerializableRunnable closeCache = new SerializableRunnable("close cache") {

        public void run() {
            Cache cache = getCache();
            cache.getRegion("region1").close();
        }
    };
    // Close the cache in vm1
    final Set<Integer> vm1Buckets = getBucketList("region1", vm1);
    vm1.invoke(closeCache);
    SerializableRunnable checkLowRedundancyBeforeRebalance = new SerializableRunnable("checkLowRedundancyBeforeRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(0, details.getActualRedundantCopies());
            assertEquals(6, details.getLowRedundancyBucketCount());
        }
    };
    SerializableRunnable checkLowRedundancyAfterRebalance = new SerializableRunnable("checkLowRedundancyAfterRebalance") {

        public void run() {
            Cache cache = getCache();
            Region region = cache.getRegion("region1");
            PartitionRegionInfo details = PartitionRegionHelper.getPartitionRegionInfo(region);
            assertEquals(6, details.getCreatedBucketCount());
            assertEquals(1, details.getActualRedundantCopies());
            assertEquals(0, details.getLowRedundancyBucketCount());
        }
    };
    // make sure we can tell that the buckets have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    // Now create the cache in another member
    vm2.invoke(createPrRegion);
    // Make sure we still have low redundancy
    vm0.invoke(checkLowRedundancyBeforeRebalance);
    /*
     * Simulates a rebalance if simulation flag is set. Otherwise, performs a rebalance.
     * 
     * A rebalance will replace offline buckets, so this should restore redundancy
     */
    rebalanceVM.invoke(new SerializableRunnable("simulateRebalance") {

        public void run() {
            Cache cache = getCache();
            ResourceManager manager = cache.getResourceManager();
            RebalanceResults results = doRebalance(simulate, manager);
            assertEquals(6, results.getTotalBucketCreatesCompleted());
            assertEquals(3, results.getTotalPrimaryTransfersCompleted());
            assertEquals(0, results.getTotalBucketTransfersCompleted());
            Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
            assertEquals(1, detailSet.size());
            PartitionRebalanceInfo details = detailSet.iterator().next();
            assertEquals(6, details.getBucketCreatesCompleted());
            assertEquals(3, details.getPrimaryTransfersCompleted());
            assertEquals(0, details.getBucketTransfersCompleted());
            Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
            assertEquals(2, afterDetails.size());
            for (PartitionMemberInfo memberDetails : afterDetails) {
                assertEquals(6, memberDetails.getBucketCount());
                assertEquals(3, memberDetails.getPrimaryCount());
            }
            if (!simulate) {
                verifyStats(manager, results);
            }
        }
    });
    Set<Integer> vm0Buckets = getBucketList("region1", vm0);
    Set<Integer> vm2Buckets = getBucketList("region1", vm2);
    // Make sure redundancy is repaired if not simulated
    if (!simulate) {
        vm0.invoke(checkLowRedundancyAfterRebalance);
    } else {
        // Othewise, we should still have broken redundancy at this point
        vm0.invoke(checkLowRedundancyBeforeRebalance);
    }
    vm2.invoke(closeCache);
    vm0.invoke(closeCache);
    if (useAccessor) {
        vm3.invoke(closeCache);
    }
    // We need to restart both VMs at the same time, because
    // they will wait for each other before allowing operations.
    AsyncInvocation async0 = vm0.invokeAsync(createPrRegion);
    AsyncInvocation async2 = vm2.invokeAsync(createPrRegion);
    async0.getResult(30000);
    async0.getResult(30000);
    if (useAccessor) {
        vm3.invoke(createAccessor);
    }
    // pause for async bucket recovery threads to finish their work. Otherwise
    // the rebalance op may think that the other member doesn't have buckets, then
    // ask it to create them and get a negative reply because it actually does
    // have the buckets, causing the test to fail
    Wait.pause(10000);
    // or it might not.
    if (!simulate) {
        rebalanceVM.invoke(new SerializableRunnable("rebalance") {

            public void run() {
                Cache cache = getCache();
                ResourceManager manager = cache.getResourceManager();
                RebalanceResults results = doRebalance(simulate, manager);
                assertEquals(0, results.getTotalBucketCreatesCompleted());
                assertEquals(0, results.getTotalBucketTransfersCompleted());
                Set<PartitionRebalanceInfo> detailSet = results.getPartitionRebalanceDetails();
                assertEquals(1, detailSet.size());
                PartitionRebalanceInfo details = detailSet.iterator().next();
                assertEquals(0, details.getBucketCreatesCompleted());
                assertEquals(0, details.getBucketTransfersCompleted());
                Set<PartitionMemberInfo> afterDetails = details.getPartitionMemberDetailsAfter();
                assertEquals(2, afterDetails.size());
                for (PartitionMemberInfo memberDetails : afterDetails) {
                    assertEquals(6, memberDetails.getBucketCount());
                    assertEquals(3, memberDetails.getPrimaryCount());
                }
            }
        });
        // Redundancy should be repaired.
        vm0.invoke(checkLowRedundancyAfterRebalance);
    }
    vm1.invoke(createPrRegion);
    // Look at vm0 buckets.
    assertEquals(vm0Buckets, getBucketList("region1", vm0));
    /*
     * Look at vm1 buckets.
     */
    if (!simulate) {
        /*
       * vm1 should have no buckets because offline buckets were recovered when vm0 and vm2 were
       * rebalanced above.
       */
        assertEquals(0, getBucketList("region1", vm1).size());
    } else {
        /*
       * No rebalancing above because the simulation flag is on. Therefore, vm1 will have recovered
       * its buckets. We need to wait for the buckets because they might still be in the middle of
       * creation in the background
       */
        waitForBucketList("region1", vm1, vm1Buckets);
    }
    // look at vm2 buckets
    assertEquals(vm2Buckets, getBucketList("region1", vm2));
}
Also used : Set(java.util.Set) TreeSet(java.util.TreeSet) HashSet(java.util.HashSet) SerializableRunnable(org.apache.geode.test.dunit.SerializableRunnable) PartitionAttributes(org.apache.geode.cache.PartitionAttributes) PartitionRebalanceInfo(org.apache.geode.cache.partition.PartitionRebalanceInfo) Host(org.apache.geode.test.dunit.Host) ResourceManager(org.apache.geode.cache.control.ResourceManager) AsyncInvocation(org.apache.geode.test.dunit.AsyncInvocation) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionMemberInfo(org.apache.geode.cache.partition.PartitionMemberInfo) VM(org.apache.geode.test.dunit.VM) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) BucketRegion(org.apache.geode.internal.cache.BucketRegion) Region(org.apache.geode.cache.Region) RebalanceResults(org.apache.geode.cache.control.RebalanceResults) PartitionRegionInfo(org.apache.geode.cache.partition.PartitionRegionInfo) Cache(org.apache.geode.cache.Cache)

Example 100 with DiskStoreFactory

use of org.apache.geode.cache.DiskStoreFactory in project geode by apache.

the class HARQueueNewImplDUnitTest method createServerCache.

public static Integer createServerCache(String ePolicy, Integer cap) throws Exception {
    new HARQueueNewImplDUnitTest().createCache(new Properties());
    AttributesFactory factory = new AttributesFactory();
    factory.setScope(Scope.DISTRIBUTED_ACK);
    factory.setDataPolicy(DataPolicy.REPLICATE);
    RegionAttributes attrs = factory.create();
    cache.createRegion(regionName, attrs);
    logger = cache.getLogger();
    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
    CacheServer server1 = cache.addCacheServer();
    server1.setPort(port);
    server1.setNotifyBySubscription(true);
    if (ePolicy != null) {
        File overflowDirectory = new File("bsi_overflow_" + port);
        overflowDirectory.mkdir();
        DiskStoreFactory dsf = cache.createDiskStoreFactory();
        File[] dirs1 = new File[] { overflowDirectory };
        server1.getClientSubscriptionConfig().setEvictionPolicy(ePolicy);
        server1.getClientSubscriptionConfig().setCapacity(cap.intValue());
        // specify diskstore for this server
        server1.getClientSubscriptionConfig().setDiskStoreName(dsf.setDiskDirs(dirs1).create("bsi").getName());
    }
    server1.start();
    return new Integer(server1.getPort());
}
Also used : AttributesFactory(org.apache.geode.cache.AttributesFactory) RegionAttributes(org.apache.geode.cache.RegionAttributes) CacheServer(org.apache.geode.cache.server.CacheServer) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory)

Aggregations

DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)132 File (java.io.File)95 DiskStore (org.apache.geode.cache.DiskStore)91 Test (org.junit.Test)86 AttributesFactory (org.apache.geode.cache.AttributesFactory)56 Region (org.apache.geode.cache.Region)46 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)46 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)35 Cache (org.apache.geode.cache.Cache)32 LocalRegion (org.apache.geode.internal.cache.LocalRegion)24 RegionFactory (org.apache.geode.cache.RegionFactory)22 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)21 DiskRegion (org.apache.geode.internal.cache.DiskRegion)19 Properties (java.util.Properties)18 VM (org.apache.geode.test.dunit.VM)18 LRUStatistics (org.apache.geode.internal.cache.lru.LRUStatistics)16 GatewaySenderFactory (org.apache.geode.cache.wan.GatewaySenderFactory)12 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)11 IOException (java.io.IOException)10 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)10