Search in sources :

Example 1 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskInitFileJUnitTest method testKrfIds.

@Test
public void testKrfIds() {
    // create a mock statistics factory for creating directory holders
    final StatisticsFactory sf = context.mock(StatisticsFactory.class);
    context.checking(new Expectations() {

        {
            ignoring(sf);
        }
    });
    // Add a mock region to the init file so it doesn't
    // delete the file when the init file is closed
    final DiskRegionView drv = context.mock(DiskRegionView.class);
    context.checking(new Expectations() {

        {
            ignoring(drv);
        }
    });
    // Create a mock disk store impl. All we need to do is return
    // this init file directory.
    final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
    context.checking(new Expectations() {

        {
            allowing(parent).getInfoFileDir();
            will(returnValue(new DirectoryHolder(sf, testDirectory, 0, 0)));
            ignoring(parent);
        }
    });
    DiskInitFile dif = new DiskInitFile("testKrfIds", parent, false, Collections.<File>emptySet());
    assertEquals(false, dif.hasKrf(1));
    dif.cmnKrfCreate(1);
    assertEquals(true, dif.hasKrf(1));
    assertEquals(false, dif.hasKrf(2));
    dif.cmnKrfCreate(2);
    assertEquals(true, dif.hasKrf(2));
    dif.createRegion(drv);
    dif.forceCompaction();
    dif.close();
    dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
    assertEquals(true, dif.hasKrf(1));
    assertEquals(true, dif.hasKrf(2));
    dif.cmnCrfDelete(1);
    assertEquals(false, dif.hasKrf(1));
    assertEquals(true, dif.hasKrf(2));
    dif.cmnCrfDelete(2);
    assertEquals(false, dif.hasKrf(2));
    dif.createRegion(drv);
    dif.forceCompaction();
    dif.close();
    dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
    assertEquals(false, dif.hasKrf(1));
    assertEquals(false, dif.hasKrf(2));
    dif.destroy();
}
Also used : Expectations(org.jmock.Expectations) StatisticsFactory(org.apache.geode.StatisticsFactory) File(java.io.File) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) Test(org.junit.Test) IntegrationTest(org.apache.geode.test.junit.categories.IntegrationTest)

Example 2 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method offlineCompact.

private void offlineCompact() {
    assert isOfflineCompacting();
    this.RECOVER_VALUES = false;
    this.deadRecordCount = 0;
    for (DiskRegionView drv : getKnown()) {
        scheduleForRecovery(OfflineCompactionDiskRegion.create(this, drv));
    }
    persistentOplogs.recoverRegionsThatAreReady();
    persistentOplogs.offlineCompact();
    // TODO soplogs - we need to do offline compaction for
    // the soplog regions, but that is not currently implemented
    getDiskInitFile().forceCompaction();
    if (this.upgradeVersionOnly) {
        System.out.println("Upgrade disk store " + this.name + " to version " + getRecoveredGFVersionName() + " finished.");
    } else {
        if (getDeadRecordCount() == 0) {
            System.out.println("Offline compaction did not find anything to compact.");
        } else {
            System.out.println("Offline compaction removed " + getDeadRecordCount() + " records.");
        }
    // If we have more than one oplog then the liveEntryCount may not be the
    // total
    // number of live entries in the disk store. So do not log the live entry
    // count
    }
}
Also used : DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 3 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method getPdxTypes.

private Collection<PdxType> getPdxTypes() throws IOException {
    // Since we are recovering a disk store, the cast from DiskRegionView -->
    // PlaceHolderDiskRegion
    // and from RegionEntry --> DiskEntry should be ok.
    // In offline mode, we need to schedule the regions to be recovered
    // explicitly.
    DiskRegionView foundPdx = null;
    for (DiskRegionView drv : getKnown()) {
        if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
            foundPdx = drv;
            scheduleForRecovery((PlaceHolderDiskRegion) drv);
        }
    }
    if (foundPdx == null) {
        return Collections.emptyList();
    // throw new IllegalStateException("The disk store does not contain any PDX types.");
    }
    recoverRegionsThatAreReady();
    ArrayList<PdxType> result = new ArrayList<PdxType>();
    for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
        Object value = re._getValueRetain(foundPdx, true);
        if (Token.isRemoved(value)) {
            continue;
        }
        if (value instanceof CachedDeserializable) {
            value = ((CachedDeserializable) value).getDeserializedForReading();
        }
        if (value instanceof PdxType) {
            PdxType type = (PdxType) value;
            result.add(type);
        }
    }
    Collections.sort(result, new Comparator<PdxType>() {

        @Override
        public int compare(PdxType o1, PdxType o2) {
            return o1.getClassName().compareTo(o2.getClassName());
        }
    });
    return result;
}
Also used : PdxType(org.apache.geode.pdx.internal.PdxType) ArrayList(java.util.ArrayList) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 4 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method getPdxTypesAndEnums.

private Collection<Object> getPdxTypesAndEnums() throws IOException {
    // Since we are recovering a disk store, the cast from DiskRegionView -->
    // PlaceHolderDiskRegion
    // and from RegionEntry --> DiskEntry should be ok.
    // In offline mode, we need to schedule the regions to be recovered
    // explicitly.
    DiskRegionView foundPdx = null;
    for (DiskRegionView drv : getKnown()) {
        if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
            foundPdx = drv;
            scheduleForRecovery((PlaceHolderDiskRegion) drv);
        }
    }
    if (foundPdx == null) {
        return Collections.emptyList();
    // throw new IllegalStateException("The disk store does not contain any PDX types.");
    }
    recoverRegionsThatAreReady();
    ArrayList<Object> result = new ArrayList<Object>();
    for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
        Object value = re._getValueRetain(foundPdx, true);
        if (Token.isRemoved(value)) {
            continue;
        }
        if (value instanceof CachedDeserializable) {
            value = ((CachedDeserializable) value).getDeserializedForReading();
        }
        result.add(value);
    }
    return result;
}
Also used : ArrayList(java.util.ArrayList) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView)

Example 5 with DiskRegionView

use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.

the class DiskStoreImpl method exportSnapshot.

private void exportSnapshot(String name, File out) throws IOException {
    // Since we are recovering a disk store, the cast from DiskRegionView -->
    // PlaceHolderDiskRegion
    // and from RegionEntry --> DiskEntry should be ok.
    // coelesce disk regions so that partitioned buckets from a member end up in
    // the same file
    Map<String, SnapshotWriter> regions = new HashMap<String, SnapshotWriter>();
    try {
        for (DiskRegionView drv : getKnown()) {
            PlaceHolderDiskRegion ph = (PlaceHolderDiskRegion) drv;
            String regionName = (drv.isBucket() ? ph.getPrName() : drv.getName());
            SnapshotWriter writer = regions.get(regionName);
            if (writer == null) {
                String fname = regionName.substring(1).replace('/', '-');
                File f = new File(out, "snapshot-" + name + "-" + fname + ".gfd");
                writer = GFSnapshot.create(f, regionName);
                regions.put(regionName, writer);
            }
            // Add a mapping from the bucket name to the writer for the PR
            // if this is a bucket.
            regions.put(drv.getName(), writer);
        }
        // explicitly.
        for (DiskRegionView drv : getKnown()) {
            final SnapshotWriter writer = regions.get(drv.getName());
            scheduleForRecovery(new ExportDiskRegion(this, drv, new ExportWriter() {

                @Override
                public void writeBatch(Map<Object, RecoveredEntry> entries) throws IOException {
                    for (Map.Entry<Object, RecoveredEntry> re : entries.entrySet()) {
                        Object key = re.getKey();
                        // TODO:KIRK:OK Rusty's code was value = de.getValueWithContext(drv);
                        Object value = re.getValue().getValue();
                        writer.snapshotEntry(new SnapshotRecord(key, value));
                    }
                }
            }));
        }
        recoverRegionsThatAreReady();
    } finally {
        // Some writers are in the map multiple times because of multiple buckets
        // get a the unique set of writers and close each writer once.
        Set<SnapshotWriter> uniqueWriters = new HashSet(regions.values());
        for (SnapshotWriter writer : uniqueWriters) {
            writer.snapshotComplete();
        }
    }
}
Also used : ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) SnapshotRecord(org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord) ExportWriter(org.apache.geode.internal.cache.ExportDiskRegion.ExportWriter) DiskRegionView(org.apache.geode.internal.cache.persistence.DiskRegionView) SnapshotWriter(org.apache.geode.internal.cache.snapshot.GFSnapshot.SnapshotWriter) File(java.io.File) Map(java.util.Map) ConcurrentHashMap(java.util.concurrent.ConcurrentHashMap) HashMap(java.util.HashMap) ConcurrentMap(java.util.concurrent.ConcurrentMap) RecoveredEntry(org.apache.geode.internal.cache.DiskEntry.RecoveredEntry) ConcurrentHashSet(org.apache.geode.internal.concurrent.ConcurrentHashSet) IntOpenHashSet(it.unimi.dsi.fastutil.ints.IntOpenHashSet) HashSet(java.util.HashSet) LongOpenHashSet(it.unimi.dsi.fastutil.longs.LongOpenHashSet)

Aggregations

DiskRegionView (org.apache.geode.internal.cache.persistence.DiskRegionView)27 ArrayList (java.util.ArrayList)5 ConcurrentHashMap (java.util.concurrent.ConcurrentHashMap)4 File (java.io.File)3 HashMap (java.util.HashMap)3 StatisticsFactory (org.apache.geode.StatisticsFactory)3 DiskRecoveryStore (org.apache.geode.internal.cache.persistence.DiskRecoveryStore)3 PdxType (org.apache.geode.pdx.internal.PdxType)3 Int2ObjectOpenHashMap (it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap)2 VersionTag (org.apache.geode.internal.cache.versions.VersionTag)2 StoredObject (org.apache.geode.internal.offheap.StoredObject)2 EnumInfo (org.apache.geode.pdx.internal.EnumInfo)2 IntegrationTest (org.apache.geode.test.junit.categories.IntegrationTest)2 Expectations (org.jmock.Expectations)2 Test (org.junit.Test)2 IntOpenHashSet (it.unimi.dsi.fastutil.ints.IntOpenHashSet)1 Long2ObjectOpenHashMap (it.unimi.dsi.fastutil.longs.Long2ObjectOpenHashMap)1 LongOpenHashSet (it.unimi.dsi.fastutil.longs.LongOpenHashSet)1 IOException (java.io.IOException)1 HashSet (java.util.HashSet)1