Search in sources :

Example 96 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesNoCompact.

@Test
public void testTwoEntriesNoCompact() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesNoCompact";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    cache.close();
    ds.disconnect();
    System.out.println("empty rvv size = " + getRVVSize(0, null, false));
    System.out.println("empty rvvgc size = " + getRVVSize(1, new int[] { 0 }, true));
    System.out.println("1 member rvv size = " + getRVVSize(1, new int[] { 1 }, false));
    System.out.println("2 member rvv size = " + getRVVSize(2, new int[] { 1, 1 }, false));
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(0, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(crfsize + createsize1 + createsize2, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, new int[] { 0 }, true), drfFile.length());
    // offline compaction should not have created a new oplog.
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(2, r.size());
    assertEquals("value1", r.get("key1"));
    assertEquals("value2", r.get("key2"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 97 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskOfflineCompactionJUnitTest method testTwoEntriesWithUpdateAndDestroy.

@Test
public void testTwoEntriesWithUpdateAndDestroy() throws Exception {
    DiskStoreFactory dsf = cache.createDiskStoreFactory();
    dsf.setAutoCompact(false);
    String name = "testTwoEntriesWithUpdateAndDestroy";
    DiskStore diskStore = dsf.create(name);
    File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
    File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
    File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
    File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
    AttributesFactory af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    Region r = cache.createRegion("r", af.create());
    int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
    r.put("key1", "value1");
    r.put("key2", "value2");
    r.put("key1", "update1");
    r.put("key2", "update2");
    r.remove("key2");
    cache.close();
    ds.disconnect();
    DiskStoreImpl.validate(name, diskStore.getDiskDirs());
    int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
    int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
    int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
    int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
    int updatesize1 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update1");
    int updatesize2 = getSize4UpdateWithoutKey(extra_byte_num_per_entry, "update2");
    // 1 tombstone without key
    int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
    assertEquals(crfsize + createsize1 + createsize2 + updatesize1 + updatesize2 + tombstonesize1, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
    long originalIfLength = ifFile.length();
    DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
    assertEquals(3, dsi.getDeadRecordCount());
    assertEquals(2, dsi.getLiveEntryCount());
    assertEquals(false, crfFile.exists());
    assertEquals(false, drfFile.exists());
    assertEquals(false, krfFile.exists());
    crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
    drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
    {
        krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
        assertEquals(true, krfFile.exists());
    }
    assertEquals(true, crfFile.exists());
    assertEquals(true, drfFile.exists());
    // compare file sizes
    // After offline compaction, only one update with key and one tombstone with key are left
    // No more OPLOG_NEW_ENTRY_BASE_REC.
    // The crf now contains an RVV with one entry
    crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
    updatesize1 += getStrSizeInOplog("key1");
    tombstonesize1 += getStrSizeInOplog("key2");
    assertEquals(crfsize + updatesize1 + tombstonesize1, crfFile.length());
    assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drfFile.length());
    assertEquals(originalIfLength, ifFile.length());
    connectDSandCache();
    dsf = cache.createDiskStoreFactory();
    diskStore = dsf.create(name);
    af = new AttributesFactory();
    af.setDiskStoreName(name);
    af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
    r = cache.createRegion("r", af.create());
    assertEquals(1, r.size());
    assertEquals("update1", r.get("key1"));
    // if test passed clean up files
    r.destroyRegion();
    diskStore.destroy();
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) AttributesFactory(org.apache.geode.cache.AttributesFactory) Region(org.apache.geode.cache.Region) File(java.io.File) DiskStoreFactory(org.apache.geode.cache.DiskStoreFactory) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 98 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskRegOplogSwtchingAndRollerJUnitTest method testSyncPersistRegionDAExp.

/**
   * tests non occurrence of DiskAccessException
   */
@Test
public void testSyncPersistRegionDAExp() {
    File testingDirectory1 = new File("testingDirectory1");
    testingDirectory1.mkdir();
    testingDirectory1.deleteOnExit();
    File file1 = new File("testingDirectory1/" + "testSyncPersistRegionDAExp" + "1");
    file1.mkdir();
    file1.deleteOnExit();
    dirs1 = new File[1];
    dirs1[0] = file1;
    diskDirSize1 = new int[1];
    diskDirSize1[0] = 2048;
    diskProps.setDiskDirsAndSizes(dirs1, diskDirSize1);
    diskProps.setPersistBackup(true);
    diskProps.setRolling(true);
    diskProps.setMaxOplogSize(100000000);
    diskProps.setRegionName("region_SyncPersistRegionDAExp");
    region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
    LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
    DiskStore ds = cache.findDiskStore(((LocalRegion) region).getDiskStoreName());
    // int[] diskSizes1 = ((LocalRegion)region).getDiskDirSizes();
    assertTrue(ds != null);
    int[] diskSizes1 = null;
    diskSizes1 = ds.getDiskDirSizes();
    assertEquals(1, diskDirSize1.length);
    assertTrue("diskSizes != 2048 ", diskSizes1[0] == 2048);
    this.diskAccessExpHelpermethod(region);
    // region.close(); // closes disk file which will flush all buffers
    closeDown();
    LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) File(java.io.File) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 99 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class DiskRegOplogSwtchingAndRollerJUnitTest method testDiskFullExcep.

// end of testGetEvictedEntry
/**
   * DiskRegNoDiskAccessExceptionTest: tests that while rolling is set to true DiskAccessException
   * doesn't occur even when amount of put data exceeds the max dir sizes.
   */
@Test
public void testDiskFullExcep() {
    boolean exceptionOccurred = false;
    int[] diskDirSize1 = new int[4];
    diskDirSize1[0] = 1048576;
    diskDirSize1[1] = 1048576;
    diskDirSize1[2] = 1048576;
    diskDirSize1[3] = 1048576;
    diskProps.setDiskDirsAndSizes(dirs, diskDirSize1);
    diskProps.setPersistBackup(true);
    diskProps.setRolling(true);
    diskProps.setMaxOplogSize(1000000000);
    diskProps.setBytesThreshold(1000000);
    diskProps.setTimeInterval(1500000);
    region = DiskRegionHelperFactory.getAsyncPersistOnlyRegion(cache, diskProps);
    DiskStore ds = cache.findDiskStore(((LocalRegion) region).getDiskStoreName());
    // int[] diskSizes1 = ((LocalRegion)region).getDiskDirSizes();
    assertTrue(ds != null);
    int[] diskSizes1 = null;
    diskSizes1 = ds.getDiskDirSizes();
    assertTrue("diskSizes != 1048576 ", diskSizes1[0] == 1048576);
    assertTrue("diskSizes != 1048576 ", diskSizes1[1] == 1048576);
    assertTrue("diskSizes != 1048576 ", diskSizes1[2] == 1048576);
    assertTrue("diskSizes != 1048576 ", diskSizes1[3] == 1048576);
    final byte[] value = new byte[1024];
    Arrays.fill(value, (byte) 77);
    try {
        for (int i = 0; i < 7000; i++) {
            region.put("" + i, value);
        }
    } catch (DiskAccessException e) {
        logWriter.error("exception not expected", e);
        exceptionOccurred = true;
    }
    if (exceptionOccurred) {
        fail("FAILED::DiskAccessException is Not expected here !!");
    }
// region.close(); // closes disk file which will flush all buffers
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) DiskAccessException(org.apache.geode.cache.DiskAccessException) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 100 with DiskStore

use of org.apache.geode.cache.DiskStore in project geode by apache.

the class Shipment method createPersistentPrsAndServer.

public static int createPersistentPrsAndServer(int redundantCopies, int totalNoofBuckets) {
    PartitionedRegionSingleHopDUnitTest test = new PartitionedRegionSingleHopDUnitTest();
    cache = test.getCache();
    DiskStore disk = cache.findDiskStore("disk");
    if (disk == null) {
        disk = cache.createDiskStoreFactory().setDiskDirs(getDiskDirs()).create("disk");
    }
    PartitionAttributesFactory paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets);
    AttributesFactory attr = new AttributesFactory();
    attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    attr.setDiskStoreName("disk");
    attr.setPartitionAttributes(paf.create());
    // attr.setConcurrencyChecksEnabled(true);
    region = cache.createRegion(PR_NAME, attr.create());
    assertNotNull(region);
    LogWriterUtils.getLogWriter().info("Partitioned Region " + PR_NAME + " created Successfully :" + region.toString());
    // creating colocated Regions
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    attr.setDiskStoreName("disk");
    attr.setPartitionAttributes(paf.create());
    // attr.setConcurrencyChecksEnabled(true);
    customerRegion = cache.createRegion("CUSTOMER", attr.create());
    assertNotNull(customerRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region CUSTOMER created Successfully :" + customerRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setColocatedWith("CUSTOMER").setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    attr.setDiskStoreName("disk");
    attr.setPartitionAttributes(paf.create());
    // attr.setConcurrencyChecksEnabled(true);
    orderRegion = cache.createRegion("ORDER", attr.create());
    assertNotNull(orderRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region ORDER created Successfully :" + orderRegion.toString());
    paf = new PartitionAttributesFactory();
    paf.setRedundantCopies(redundantCopies).setTotalNumBuckets(totalNoofBuckets).setColocatedWith("ORDER").setPartitionResolver(new CustomerIDPartitionResolver("CustomerIDPartitionResolver"));
    attr = new AttributesFactory();
    attr.setDataPolicy(DataPolicy.PERSISTENT_PARTITION);
    attr.setDiskStoreName("disk");
    attr.setPartitionAttributes(paf.create());
    // attr.setConcurrencyChecksEnabled(true);
    shipmentRegion = cache.createRegion("SHIPMENT", attr.create());
    assertNotNull(shipmentRegion);
    LogWriterUtils.getLogWriter().info("Partitioned Region SHIPMENT created Successfully :" + shipmentRegion.toString());
    replicatedRegion = cache.createRegion("rr", new AttributesFactory().create());
    CacheServer server = cache.addCacheServer();
    int port = AvailablePort.getRandomAvailablePort(AvailablePort.SOCKET);
    server.setPort(port);
    server.setHostnameForClients("localhost");
    try {
        server.start();
    } catch (IOException e) {
        Assert.fail("Failed to start server ", e);
    }
    return port;
}
Also used : DiskStore(org.apache.geode.cache.DiskStore) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) AttributesFactory(org.apache.geode.cache.AttributesFactory) PartitionAttributesFactory(org.apache.geode.cache.PartitionAttributesFactory) CacheServer(org.apache.geode.cache.server.CacheServer) IOException(java.io.IOException)

Aggregations

DiskStore (org.apache.geode.cache.DiskStore)190 Test (org.junit.Test)120 AttributesFactory (org.apache.geode.cache.AttributesFactory)91 DiskStoreFactory (org.apache.geode.cache.DiskStoreFactory)91 File (java.io.File)79 Region (org.apache.geode.cache.Region)71 Cache (org.apache.geode.cache.Cache)61 DistributedTest (org.apache.geode.test.junit.categories.DistributedTest)54 PartitionAttributesFactory (org.apache.geode.cache.PartitionAttributesFactory)46 SerializableRunnable (org.apache.geode.test.dunit.SerializableRunnable)44 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)39 LocalRegion (org.apache.geode.internal.cache.LocalRegion)32 FlakyTest (org.apache.geode.test.junit.categories.FlakyTest)31 VM (org.apache.geode.test.dunit.VM)28 DiskRegion (org.apache.geode.internal.cache.DiskRegion)24 Host (org.apache.geode.test.dunit.Host)23 Expectations (org.jmock.Expectations)23 InternalCache (org.apache.geode.internal.cache.InternalCache)21 UnitTest (org.apache.geode.test.junit.categories.UnitTest)21 IOException (java.io.IOException)20