use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskInitFileJUnitTest method testKrfIds.
@Test
public void testKrfIds() {
// create a mock statistics factory for creating directory holders
final StatisticsFactory sf = context.mock(StatisticsFactory.class);
context.checking(new Expectations() {
{
ignoring(sf);
}
});
// Add a mock region to the init file so it doesn't
// delete the file when the init file is closed
final DiskRegionView drv = context.mock(DiskRegionView.class);
context.checking(new Expectations() {
{
ignoring(drv);
}
});
// Create a mock disk store impl. All we need to do is return
// this init file directory.
final DiskStoreImpl parent = context.mock(DiskStoreImpl.class);
context.checking(new Expectations() {
{
allowing(parent).getInfoFileDir();
will(returnValue(new DirectoryHolder(sf, testDirectory, 0, 0)));
ignoring(parent);
}
});
DiskInitFile dif = new DiskInitFile("testKrfIds", parent, false, Collections.<File>emptySet());
assertEquals(false, dif.hasKrf(1));
dif.cmnKrfCreate(1);
assertEquals(true, dif.hasKrf(1));
assertEquals(false, dif.hasKrf(2));
dif.cmnKrfCreate(2);
assertEquals(true, dif.hasKrf(2));
dif.createRegion(drv);
dif.forceCompaction();
dif.close();
dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
assertEquals(true, dif.hasKrf(1));
assertEquals(true, dif.hasKrf(2));
dif.cmnCrfDelete(1);
assertEquals(false, dif.hasKrf(1));
assertEquals(true, dif.hasKrf(2));
dif.cmnCrfDelete(2);
assertEquals(false, dif.hasKrf(2));
dif.createRegion(drv);
dif.forceCompaction();
dif.close();
dif = new DiskInitFile("testKrfIds", parent, true, Collections.<File>emptySet());
assertEquals(false, dif.hasKrf(1));
assertEquals(false, dif.hasKrf(2));
dif.destroy();
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method offlineCompact.
private void offlineCompact() {
assert isOfflineCompacting();
this.RECOVER_VALUES = false;
this.deadRecordCount = 0;
for (DiskRegionView drv : getKnown()) {
scheduleForRecovery(OfflineCompactionDiskRegion.create(this, drv));
}
persistentOplogs.recoverRegionsThatAreReady();
persistentOplogs.offlineCompact();
// TODO soplogs - we need to do offline compaction for
// the soplog regions, but that is not currently implemented
getDiskInitFile().forceCompaction();
if (this.upgradeVersionOnly) {
System.out.println("Upgrade disk store " + this.name + " to version " + getRecoveredGFVersionName() + " finished.");
} else {
if (getDeadRecordCount() == 0) {
System.out.println("Offline compaction did not find anything to compact.");
} else {
System.out.println("Offline compaction removed " + getDeadRecordCount() + " records.");
}
// If we have more than one oplog then the liveEntryCount may not be the
// total
// number of live entries in the disk store. So do not log the live entry
// count
}
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method getPdxTypes.
private Collection<PdxType> getPdxTypes() throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// In offline mode, we need to schedule the regions to be recovered
// explicitly.
DiskRegionView foundPdx = null;
for (DiskRegionView drv : getKnown()) {
if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
foundPdx = drv;
scheduleForRecovery((PlaceHolderDiskRegion) drv);
}
}
if (foundPdx == null) {
return Collections.emptyList();
// throw new IllegalStateException("The disk store does not contain any PDX types.");
}
recoverRegionsThatAreReady();
ArrayList<PdxType> result = new ArrayList<PdxType>();
for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
Object value = re._getValueRetain(foundPdx, true);
if (Token.isRemoved(value)) {
continue;
}
if (value instanceof CachedDeserializable) {
value = ((CachedDeserializable) value).getDeserializedForReading();
}
if (value instanceof PdxType) {
PdxType type = (PdxType) value;
result.add(type);
}
}
Collections.sort(result, new Comparator<PdxType>() {
@Override
public int compare(PdxType o1, PdxType o2) {
return o1.getClassName().compareTo(o2.getClassName());
}
});
return result;
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method getPdxTypesAndEnums.
private Collection<Object> getPdxTypesAndEnums() throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// In offline mode, we need to schedule the regions to be recovered
// explicitly.
DiskRegionView foundPdx = null;
for (DiskRegionView drv : getKnown()) {
if (drv.getName().equals(PeerTypeRegistration.REGION_FULL_PATH)) {
foundPdx = drv;
scheduleForRecovery((PlaceHolderDiskRegion) drv);
}
}
if (foundPdx == null) {
return Collections.emptyList();
// throw new IllegalStateException("The disk store does not contain any PDX types.");
}
recoverRegionsThatAreReady();
ArrayList<Object> result = new ArrayList<Object>();
for (RegionEntry re : foundPdx.getRecoveredEntryMap().regionEntries()) {
Object value = re._getValueRetain(foundPdx, true);
if (Token.isRemoved(value)) {
continue;
}
if (value instanceof CachedDeserializable) {
value = ((CachedDeserializable) value).getDeserializedForReading();
}
result.add(value);
}
return result;
}
use of org.apache.geode.internal.cache.persistence.DiskRegionView in project geode by apache.
the class DiskStoreImpl method exportSnapshot.
private void exportSnapshot(String name, File out) throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// coelesce disk regions so that partitioned buckets from a member end up in
// the same file
Map<String, SnapshotWriter> regions = new HashMap<String, SnapshotWriter>();
try {
for (DiskRegionView drv : getKnown()) {
PlaceHolderDiskRegion ph = (PlaceHolderDiskRegion) drv;
String regionName = (drv.isBucket() ? ph.getPrName() : drv.getName());
SnapshotWriter writer = regions.get(regionName);
if (writer == null) {
String fname = regionName.substring(1).replace('/', '-');
File f = new File(out, "snapshot-" + name + "-" + fname + ".gfd");
writer = GFSnapshot.create(f, regionName);
regions.put(regionName, writer);
}
// Add a mapping from the bucket name to the writer for the PR
// if this is a bucket.
regions.put(drv.getName(), writer);
}
// explicitly.
for (DiskRegionView drv : getKnown()) {
final SnapshotWriter writer = regions.get(drv.getName());
scheduleForRecovery(new ExportDiskRegion(this, drv, new ExportWriter() {
@Override
public void writeBatch(Map<Object, RecoveredEntry> entries) throws IOException {
for (Map.Entry<Object, RecoveredEntry> re : entries.entrySet()) {
Object key = re.getKey();
// TODO:KIRK:OK Rusty's code was value = de.getValueWithContext(drv);
Object value = re.getValue().getValue();
writer.snapshotEntry(new SnapshotRecord(key, value));
}
}
}));
}
recoverRegionsThatAreReady();
} finally {
// Some writers are in the map multiple times because of multiple buckets
// get a the unique set of writers and close each writer once.
Set<SnapshotWriter> uniqueWriters = new HashSet(regions.values());
for (SnapshotWriter writer : uniqueWriters) {
writer.snapshotComplete();
}
}
}
Aggregations