use of org.apache.geode.internal.cache.ExportDiskRegion.ExportWriter in project geode by apache.
the class DiskStoreImpl method exportSnapshot.
private void exportSnapshot(String name, File out) throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// coelesce disk regions so that partitioned buckets from a member end up in
// the same file
Map<String, SnapshotWriter> regions = new HashMap<String, SnapshotWriter>();
try {
for (DiskRegionView drv : getKnown()) {
PlaceHolderDiskRegion ph = (PlaceHolderDiskRegion) drv;
String regionName = (drv.isBucket() ? ph.getPrName() : drv.getName());
SnapshotWriter writer = regions.get(regionName);
if (writer == null) {
String fname = regionName.substring(1).replace('/', '-');
File f = new File(out, "snapshot-" + name + "-" + fname + ".gfd");
writer = GFSnapshot.create(f, regionName);
regions.put(regionName, writer);
}
// Add a mapping from the bucket name to the writer for the PR
// if this is a bucket.
regions.put(drv.getName(), writer);
}
// explicitly.
for (DiskRegionView drv : getKnown()) {
final SnapshotWriter writer = regions.get(drv.getName());
scheduleForRecovery(new ExportDiskRegion(this, drv, new ExportWriter() {
@Override
public void writeBatch(Map<Object, RecoveredEntry> entries) throws IOException {
for (Map.Entry<Object, RecoveredEntry> re : entries.entrySet()) {
Object key = re.getKey();
// TODO:KIRK:OK Rusty's code was value = de.getValueWithContext(drv);
Object value = re.getValue().getValue();
writer.snapshotEntry(new SnapshotRecord(key, value));
}
}
}));
}
recoverRegionsThatAreReady();
} finally {
// Some writers are in the map multiple times because of multiple buckets
// get a the unique set of writers and close each writer once.
Set<SnapshotWriter> uniqueWriters = new HashSet(regions.values());
for (SnapshotWriter writer : uniqueWriters) {
writer.snapshotComplete();
}
}
}
Aggregations