use of org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord in project geode by apache.
the class DiskStoreImpl method exportSnapshot.
private void exportSnapshot(String name, File out) throws IOException {
// Since we are recovering a disk store, the cast from DiskRegionView -->
// PlaceHolderDiskRegion
// and from RegionEntry --> DiskEntry should be ok.
// coelesce disk regions so that partitioned buckets from a member end up in
// the same file
Map<String, SnapshotWriter> regions = new HashMap<String, SnapshotWriter>();
try {
for (DiskRegionView drv : getKnown()) {
PlaceHolderDiskRegion ph = (PlaceHolderDiskRegion) drv;
String regionName = (drv.isBucket() ? ph.getPrName() : drv.getName());
SnapshotWriter writer = regions.get(regionName);
if (writer == null) {
String fname = regionName.substring(1).replace('/', '-');
File f = new File(out, "snapshot-" + name + "-" + fname + ".gfd");
writer = GFSnapshot.create(f, regionName);
regions.put(regionName, writer);
}
// Add a mapping from the bucket name to the writer for the PR
// if this is a bucket.
regions.put(drv.getName(), writer);
}
// explicitly.
for (DiskRegionView drv : getKnown()) {
final SnapshotWriter writer = regions.get(drv.getName());
scheduleForRecovery(new ExportDiskRegion(this, drv, new ExportWriter() {
@Override
public void writeBatch(Map<Object, RecoveredEntry> entries) throws IOException {
for (Map.Entry<Object, RecoveredEntry> re : entries.entrySet()) {
Object key = re.getKey();
// TODO:KIRK:OK Rusty's code was value = de.getValueWithContext(drv);
Object value = re.getValue().getValue();
writer.snapshotEntry(new SnapshotRecord(key, value));
}
}
}));
}
recoverRegionsThatAreReady();
} finally {
// Some writers are in the map multiple times because of multiple buckets
// get a the unique set of writers and close each writer once.
Set<SnapshotWriter> uniqueWriters = new HashSet(regions.values());
for (SnapshotWriter writer : uniqueWriters) {
writer.snapshotComplete();
}
}
}
use of org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord in project geode by apache.
the class GFSnapshot method main.
public static void main(String[] args) throws Exception {
if (args.length != 1) {
System.out.println("Usage: GFSnapshot <file>");
System.exit(1);
}
GFSnapshotImporter imp = new GFSnapshotImporter(new File(args[0]));
try {
System.out.println("Snapshot format is version " + imp.getVersion());
System.out.println("Snapshot region is " + imp.getRegionName());
ExportedRegistry reg = imp.getPdxTypes();
Map<Integer, PdxType> types = reg.types();
System.out.println("Found " + types.size() + " PDX types:");
for (Entry<Integer, PdxType> entry : types.entrySet()) {
System.out.println("\t" + entry.getKey() + " = " + entry.getValue());
}
Map<Integer, EnumInfo> enums = reg.enums();
System.out.println("Found " + enums.size() + " PDX enums: ");
for (Entry<Integer, EnumInfo> entry : enums.entrySet()) {
System.out.println("\t" + entry.getKey() + " = " + entry.getValue());
}
System.out.println();
SnapshotRecord record;
while ((record = imp.readSnapshotRecord()) != null) {
System.out.println(record.getKeyObject() + " = " + record.getValueObject());
}
} finally {
imp.close();
}
}
use of org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord in project geode by apache.
the class GFSnapshot method read.
/**
* Reads a snapshot file.
*
* @param <K> the key type
* @param <V> the value type
* @param snapshot the snapshot file
* @return the snapshot iterator
*
* @throws IOException error reading the snapshot file
* @throws ClassNotFoundException unable to deserialize entry
*/
public static <K, V> SnapshotIterator<K, V> read(final File snapshot) throws IOException, ClassNotFoundException {
return new SnapshotIterator<K, V>() {
GFSnapshotImporter in = new GFSnapshotImporter(snapshot);
private boolean foundNext;
private Entry<K, V> next;
@Override
public boolean hasNext() throws IOException, ClassNotFoundException {
if (!foundNext) {
return moveNext();
}
return true;
}
@Override
public Entry<K, V> next() throws IOException, ClassNotFoundException {
if (!foundNext && !moveNext()) {
throw new NoSuchElementException();
}
Entry<K, V> result = next;
foundNext = false;
next = null;
return result;
}
@Override
public void close() throws IOException {
in.close();
}
private boolean moveNext() throws IOException, ClassNotFoundException {
SnapshotRecord record;
while ((record = in.readSnapshotRecord()) != null) {
foundNext = true;
final K key = record.getKeyObject();
final V value = record.getValueObject();
next = new Entry<K, V>() {
@Override
public K getKey() {
return key;
}
@Override
public V getValue() {
return value;
}
@Override
public V setValue(V value) {
throw new UnsupportedOperationException();
}
};
return true;
}
close();
return false;
}
};
}
use of org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord in project geode by apache.
the class LocalExporter method export.
@Override
public long export(Region<K, V> region, ExportSink sink, SnapshotOptions<K, V> options) throws IOException {
LocalRegion local = RegionSnapshotServiceImpl.getLocalRegion(region);
long count = 0;
for (Entry<K, V> entry : region.entrySet()) {
try {
if (options.getFilter() == null || options.getFilter().accept(entry)) {
sink.write(new SnapshotRecord(local, entry));
count++;
}
} catch (EntryDestroyedException e) {
// continue to next entry
}
}
return count;
}
use of org.apache.geode.internal.cache.snapshot.SnapshotPacket.SnapshotRecord in project geode by apache.
the class RegionSnapshotServiceImpl method importOnMember.
private void importOnMember(File snapshot, SnapshotFormat format, SnapshotOptions<K, V> options) throws IOException, ClassNotFoundException {
final LocalRegion local = getLocalRegion(region);
if (getLoggerI18n().infoEnabled())
getLoggerI18n().info(LocalizedStrings.Snapshot_IMPORT_BEGIN_0, region.getName());
long count = 0;
long bytes = 0;
long start = CachePerfStats.getStatTime();
// Would be interesting to use a PriorityQueue ordered on isDone()
// but this is probably close enough in practice.
LinkedList<Future<?>> puts = new LinkedList<Future<?>>();
GFSnapshotImporter in = new GFSnapshotImporter(snapshot);
try {
int bufferSize = 0;
Map<K, V> buffer = new HashMap<K, V>();
SnapshotRecord record;
while ((record = in.readSnapshotRecord()) != null) {
bytes += record.getSize();
K key = record.getKeyObject();
// Until we modify the semantics of put/putAll to allow null values we
// have to subvert the API by using Token.INVALID. Alternatively we could
// invoke create/invalidate directly but that prevents us from using
// bulk operations. The ugly type coercion below is necessary to allow
// strong typing elsewhere.
V val = (V) Token.INVALID;
if (record.hasValue()) {
byte[] data = record.getValue();
// get lost and we start seeing serialization problems.
if (data.length > 0 && data[0] == DSCODE.BYTE_ARRAY) {
// It would be faster to use System.arraycopy() directly but since
// length field is variable it's probably safest and simplest to
// keep the logic in the InternalDataSerializer.
val = record.getValueObject();
} else {
val = (V) CachedDeserializableFactory.create(record.getValue());
}
}
if (includeEntry(options, key, val)) {
buffer.put(key, val);
bufferSize += record.getSize();
count++;
// can keep the disk busy. Throttle puts so we don't overwhelm the cache.
if (bufferSize > BUFFER_SIZE) {
if (puts.size() == IMPORT_CONCURRENCY) {
puts.removeFirst().get();
}
final Map<K, V> copy = new HashMap<K, V>(buffer);
Future<?> f = GemFireCacheImpl.getExisting("Importing region from snapshot").getDistributionManager().getWaitingThreadPool().submit(new Runnable() {
@Override
public void run() {
local.basicImportPutAll(copy, !options.shouldInvokeCallbacks());
}
});
puts.addLast(f);
buffer.clear();
bufferSize = 0;
}
}
}
// send off any remaining entries
if (!buffer.isEmpty()) {
local.basicImportPutAll(buffer, !options.shouldInvokeCallbacks());
}
// wait for completion and check for errors
while (!puts.isEmpty()) {
puts.removeFirst().get();
}
if (getLoggerI18n().infoEnabled()) {
getLoggerI18n().info(LocalizedStrings.Snapshot_IMPORT_END_0_1_2_3, new Object[] { count, bytes, region.getName(), snapshot });
}
} catch (InterruptedException e) {
while (!puts.isEmpty()) {
puts.removeFirst().cancel(true);
}
Thread.currentThread().interrupt();
throw (IOException) new InterruptedIOException().initCause(e);
} catch (ExecutionException e) {
while (!puts.isEmpty()) {
puts.removeFirst().cancel(true);
}
throw new IOException(e);
} finally {
in.close();
local.getCachePerfStats().endImport(count, start);
}
}
Aggregations