Search in sources :

Example 76 with DistributedSystem

use of org.apache.geode.distributed.DistributedSystem in project geode by apache.

the class PartitionedRegionSingleHopWithServerGroupDUnitTest method createClientWithLocator.

public static void createClientWithLocator(String host, int port0, String group) {
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    props.setProperty(LOG_FILE, "");
    PartitionedRegionSingleHopWithServerGroupDUnitTest test = new PartitionedRegionSingleHopWithServerGroupDUnitTest();
    DistributedSystem ds = test.getSystem(props);
    cache = CacheFactory.create(ds);
    assertNotNull(cache);
    CacheServerTestUtil.disableShufflingOfEndpoints();
    Pool p;
    try {
        p = PoolManager.createFactory().addLocator(host, port0).setServerGroup(group).setPingInterval(250).setSubscriptionEnabled(true).setSubscriptionRedundancy(-1).setReadTimeout(2000).setSocketBufferSize(1000).setMinConnections(6).setMaxConnections(10).setRetryAttempts(3).create(PR_NAME);
    } finally {
        CacheServerTestUtil.enableShufflingOfEndpoints();
    }
    createRegionsInClientCache(p.getName());
}
Also used : Pool(org.apache.geode.cache.client.Pool) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) DistributedSystem(org.apache.geode.distributed.DistributedSystem)

Example 77 with DistributedSystem

use of org.apache.geode.distributed.DistributedSystem in project geode by apache.

the class PartitionedRegionSingleHopWithServerGroupDUnitTest method createServerWithLocatorAndServerGroup2Regions.

public static int createServerWithLocatorAndServerGroup2Regions(String locator, int localMaxMemory, int redundantCopies, int totalNoofBuckets, String group) {
    Properties props = new Properties();
    props = new Properties();
    props.setProperty(LOCATORS, locator);
    System.setProperty(DistributionConfig.GEMFIRE_PREFIX + "PoolImpl.honourServerGroupsInPRSingleHop", "true");
    PartitionedRegionSingleHopWithServerGroupDUnitTest test = new PartitionedRegionSingleHopWithServerGroupDUnitTest();
    DistributedSystem ds = test.getSystem(props);
    cache = CacheFactory.create(ds);
    CacheServer server = cache.addCacheServer();
    if (group.length() != 0) {
        StringTokenizer t = new StringTokenizer(group, ",");
        String[] a = new String[t.countTokens()];
        int i = 0;
        while (t.hasMoreTokens()) {
            a[i] = t.nextToken();
            i++;
        }
        server.setGroups(a);
    }
    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
    server.setPort(port);
    server.setHostnameForClients("localhost");
    try {
        server.start();
    } catch (IOException e) {
        Assert.fail("Failed to start server ", e);
    }
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets);
    AttributesFactory attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    region = cache.createRegion(PR_NAME, attr.create());
    assertNotNull(region);
    LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME + " created Successfully :" + region.toString());
    // creating colocated Regions
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    customerRegion = cache.createRegion("CUSTOMER", attr.create());
    assertNotNull(customerRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    orderRegion = cache.createRegion("ORDER", attr.create());
    assertNotNull(orderRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region ORDER created Successfully :" + orderRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
    assertNotNull(shipmentRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets);
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    region2 = cache.createRegion(PR_NAME2, attr.create());
    assertNotNull(region2);
    LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME2 + " created Successfully :" + region2.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    customerRegion2 = cache.createRegion(CUSTOMER2, attr.create());
    assertNotNull(customerRegion2);
    LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER2 created Successfully :" + customerRegion2.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    orderRegion2 = cache.createRegion(ORDER2, attr.create());
    assertNotNull(orderRegion2);
    LogWriterUtils.getLogWriter().info("Partitioned Region ORDER2 created Successfully :" + orderRegion2.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setLocalMaxMemory(localMaxMemory).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    shipmentRegion2 = cache.createRegion(SHIPMENT2, attr.create());
    assertNotNull(shipmentRegion2);
    LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT2 created Successfully :" + shipmentRegion2.toString());
    return port;
}
Also used : StringTokenizer(java.util.StringTokenizer) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheServer(org.apache.geode.cache.server.CacheServer) IOException(java.io.IOException) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) DistributedSystem(org.apache.geode.distributed.DistributedSystem)

Example 78 with DistributedSystem

use of org.apache.geode.distributed.DistributedSystem in project geode by apache.

the class Shipment method createServerWithLocator.

public static int createServerWithLocator(String locString, int redundantCopies, int totalNoofBuckets) {
    Properties props = new Properties();
    props.setProperty(LOCATORS, locString);
    PartitionedRegionSingleHopDUnitTest test = new PartitionedRegionSingleHopDUnitTest();
    DistributedSystem ds = test.getSystem(props);
    cache = CacheFactory.create(ds);
    CacheServer server = cache.addCacheServer();
    server.setPort(0);
    server.setHostnameForClients("localhost");
    try {
        server.start();
    } catch (IOException e) {
        Assert.fail("Failed to start server ", e);
    }
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets);
    AttributesFactory attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    attr.setConcurrencyChecksEnabled(true);
    region = cache.createRegion(PR_NAME, attr.create());
    assertNotNull(region);
    LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME + " created Successfully :" + region.toString());
    // creating colocated Regions
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    attr.setConcurrencyChecksEnabled(true);
    customerRegion = cache.createRegion("CUSTOMER", attr.create());
    assertNotNull(customerRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setColocatedWith("CUSTOMER").setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    attr.setConcurrencyChecksEnabled(true);
    orderRegion = cache.createRegion("ORDER", attr.create());
    assertNotNull(orderRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region ORDER created Successfully :" + orderRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setColocatedWith("ORDER").setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setPartitionAttributes(paf.create());
    attr.setConcurrencyChecksEnabled(true);
    shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
    assertNotNull(shipmentRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString());
    return server.getPort();
}
Also used : PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheServer(org.apache.geode.cache.server.CacheServer) IOException(java.io.IOException) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) DistributedSystem(org.apache.geode.distributed.DistributedSystem)

Example 79 with DistributedSystem

use of org.apache.geode.distributed.DistributedSystem in project geode by apache.

the class MultiThreadedOplogPerJUnitPerformanceTest method testPerf.

@Test
public void testPerf() {
    Properties props = new Properties();
    props.setProperty(MCAST_PORT, "0");
    props.setProperty(LOCATORS, "");
    props.setProperty(LOG_LEVEL, "info");
    DistributedSystem ds = DistributedSystem.connect(props);
    Cache cache = null;
    try {
        cache = CacheFactory.create(ds);
    } catch (Exception e) {
        e.printStackTrace();
    }
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    AttributesFactory factory = new AttributesFactory();
    factory.setPersistBackup(false);
    factory.setScope(Scope.LOCAL);
    factory.setEvictionAttributes(EvictionAttributes.createLRUEntryAttributes(10, EvictionAction.OVERFLOW_TO_DISK));
    // Properties props1 = new Properties();
    factory.setDiskSynchronous(true);
    dsf.setAutoCompact(false);
    ((DiskStoreFactoryImpl) dsf).setMaxOplogSizeInBytes(200000000);
    dsf.setDiskDirs(dirs);
    factory.setDiskStoreName(dsf.create("perfTestRegion").getName());
    try {
        region = cache.createVMRegion("perfTestRegion", factory.createRegionAttributes());
    } catch (Exception e) {
        e.printStackTrace();
    }
    Thread[] threads = new Thread[numberOfThreads];
    for (int i = 0; i < numberOfThreads; i++) {
        // .start();
        threads[i] = new Thread(new Writer(i));
        threads[i].start();
    }
    for (int i = 0; i < numberOfThreads; i++) {
        ThreadUtils.join(threads[i], 30 * 1000);
    }
    long totalPuts = ((long) numberOfIterations * numberOfKeysPerThread * numberOfThreads);
    System.out.println(" total puts is " + totalPuts);
    System.out.println(" total time in milliseconds is " + totalTime);
    System.out.println(" writes per second is " + (totalPuts * 1000 * numberOfThreads) / (totalTime));
    region.destroyRegion();
}
Also used : DiskStoreFactoryImpl(org.apache.geode.internal.cache.DiskStoreFactoryImpl) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) DistributedSystem(org.apache.geode.distributed.DistributedSystem) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 80 with DistributedSystem

use of org.apache.geode.distributed.DistributedSystem in project geode by apache.

the class PRFunctionExecutionDUnitTest method testFunctionExecution.

/**
   * Test to validate that the function execution is successful on PR with Loner Distributed System
   */
@Test
public void testFunctionExecution() throws Exception {
    final String rName = getUniqueName();
    Host host = Host.getHost(0);
    final VM datastore = host.getVM(3);
    datastore.invoke(new SerializableCallable("Create PR with Function Factory") {

        public Object call() throws Exception {
            Properties props = new Properties();
            props.setProperty(MCAST_PORT, "0");
            props.setProperty(LOCATORS, "");
            DistributedSystem ds = getSystem(props);
            assertNotNull(ds);
            ds.disconnect();
            ds = getSystem(props);
            cache = CacheFactory.create(ds);
            assertNotNull(cache);
            RegionAttributes ra = PartitionedRegionTestHelper.createRegionAttrsForPR(0, 10);
            AttributesFactory raf = new AttributesFactory(ra);
            PartitionAttributesImpl pa = new PartitionAttributesImpl();
            pa.setAll(ra.getPartitionAttributes());
            raf.setPartitionAttributes(pa);
            Region pr = cache.createRegion(rName, raf.create());
            final String testKey = "execKey";
            final Set testKeysSet = new HashSet();
            testKeysSet.add(testKey);
            Function function = new TestFunction(true, TestFunction.TEST_FUNCTION1);
            FunctionService.registerFunction(function);
            Execution dataSet = FunctionService.onRegion(pr);
            ResultCollector result = dataSet.setArguments(Boolean.TRUE).withFilter(testKeysSet).execute(function);
            System.out.println("KBKBKB : Result I got : " + result.getResult());
            return Boolean.TRUE;
        }
    });
}
Also used : LocalDataSet(org.apache.geode.internal.cache.LocalDataSet) Set(java.util.Set) HashSet(java.util.HashSet) TestFunction(org.apache.geode.internal.cache.functions.TestFunction) RegionAttributes(org.apache.geode.cache.RegionAttributes) Host(org.apache.geode.test.dunit.Host) ConfigurationProperties(org.apache.geode.distributed.ConfigurationProperties) Properties(java.util.Properties) InternalDistributedSystem(org.apache.geode.distributed.internal.InternalDistributedSystem) DistributedSystem(org.apache.geode.distributed.DistributedSystem) IgnoredException(org.apache.geode.test.dunit.IgnoredException) FunctionException(org.apache.geode.cache.execute.FunctionException) Function(org.apache.geode.cache.execute.Function) TestFunction(org.apache.geode.internal.cache.functions.TestFunction) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) PartitionAttributesImpl(org.apache.geode.internal.cache.PartitionAttributesImpl) Execution(org.apache.geode.cache.execute.Execution) VM(org.apache.geode.test.dunit.VM) SerializableCallable(org.apache.geode.test.dunit.SerializableCallable) PartitionedRegion(org.apache.geode.internal.cache.PartitionedRegion) Region(org.apache.geode.cache.Region) ResultCollector(org.apache.geode.cache.execute.ResultCollector) HashSet(java.util.HashSet) DistributedTest(org.apache.geode.test.junit.categories.DistributedTest) Test(org.junit.Test)

Aggregations

DistributedSystem (org.apache.geode.distributed.DistributedSystem)250 Properties (java.util.Properties)102 InternalDistributedSystem (org.apache.geode.distributed.internal.InternalDistributedSystem)65 ConfigurationProperties (org.apache.geode.distributed.ConfigurationProperties)61 Test (org.junit.Test)59 Cache (org.apache.geode.cache.Cache)55 AttributesFactory (org.apache.geode.cache.AttributesFactory)34 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)30 Region (org.apache.geode.cache.Region)24 IOException (java.io.IOException)20 CacheServer (org.apache.geode.cache.server.CacheServer)20 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)20 InternalCache (org.apache.geode.internal.cache.InternalCache)16 IgnoredException (org.apache.geode.test.dunit.IgnoredException)16 ArrayList (java.util.ArrayList)15 LogWriter (org.apache.geode.LogWriter)15 DistributedMember (org.apache.geode.distributed.DistributedMember)15 LocalRegion (org.apache.geode.internal.cache.LocalRegion)15 RegionAttributes (org.apache.geode.cache.RegionAttributes)14 Pool (org.apache.geode.cache.client.Pool)14