use of org.apache.geode.internal.cache.persistence.BackupManager in project geode by apache.
the class MemberMBeanBridge method backupMember.
/**
* backs up all the disk to the targeted directory
*
* @param targetDirPath path of the directory where back up is to be taken
* @return array of DiskBackup results which might get aggregated at Managing node Check the
* validity of this mbean call. When does it make sense to backup a single member of a
* gemfire system in isolation of the other members?
*/
public DiskBackupResult[] backupMember(String targetDirPath) {
if (cache != null) {
Collection<DiskStore> diskStores = cache.listDiskStoresIncludingRegionOwned();
for (DiskStore store : diskStores) {
store.flush();
}
}
DiskBackupResult[] diskBackUpResult = null;
File targetDir = new File(targetDirPath);
if (cache == null) {
return null;
} else {
try {
BackupManager manager = cache.startBackup(cache.getInternalDistributedSystem().getDistributedMember());
boolean abort = true;
Set<PersistentID> existingDataStores;
Set<PersistentID> successfulDataStores;
try {
existingDataStores = manager.prepareBackup();
abort = false;
} finally {
successfulDataStores = manager.finishBackup(targetDir, null, /* TODO rishi */
abort);
}
diskBackUpResult = new DiskBackupResult[existingDataStores.size()];
int j = 0;
for (PersistentID id : existingDataStores) {
if (successfulDataStores.contains(id)) {
diskBackUpResult[j] = new DiskBackupResult(id.getDirectory(), false);
} else {
diskBackUpResult[j] = new DiskBackupResult(id.getDirectory(), true);
}
j++;
}
} catch (IOException e) {
throw new ManagementException(e);
}
}
return diskBackUpResult;
}
use of org.apache.geode.internal.cache.persistence.BackupManager in project geode by apache.
the class DiskStoreFactoryImpl method create.
public DiskStore create(String name) {
this.attrs.name = name;
// As a simple fix for 41290, only allow one DiskStore to be created
// at a time per cache by syncing on the cache.
DiskStore result;
synchronized (this.cache) {
result = findExisting(name);
if (result == null) {
if (this.cache instanceof GemFireCacheImpl) {
TypeRegistry registry = this.cache.getPdxRegistry();
DiskStoreImpl dsi = new DiskStoreImpl(this.cache, this.attrs);
result = dsi;
// Added for M&M
this.cache.getInternalDistributedSystem().handleResourceEvent(ResourceEvent.DISKSTORE_CREATE, dsi);
dsi.doInitialRecovery();
this.cache.addDiskStore(dsi);
if (registry != null) {
registry.creatingDiskStore(dsi);
}
} else if (this.cache instanceof CacheCreation) {
CacheCreation creation = (CacheCreation) this.cache;
result = new DiskStoreAttributesCreation(this.attrs);
creation.addDiskStore(result);
}
}
}
// that isn't backed up.
if (this.cache instanceof GemFireCacheImpl) {
BackupManager backup = this.cache.getBackupManager();
if (backup != null) {
backup.waitForBackup();
}
}
return result;
}
use of org.apache.geode.internal.cache.persistence.BackupManager in project geode by apache.
the class BackupJUnitTest method backupAndRecover.
public void backupAndRecover(RegionCreator regionFactory) throws IOException, InterruptedException {
Region region = regionFactory.createRegion();
// Put enough data to roll some oplogs
for (int i = 0; i < 1024; i++) {
region.put(i, getBytes(i));
}
for (int i = 0; i < 512; i++) {
region.destroy(i);
}
for (int i = 1024; i < 2048; i++) {
region.put(i, getBytes(i));
}
// This section of the test is for bug 43951
findDiskStore().forceRoll();
// add a put to the current crf
region.put("junk", "value");
// do a destroy of a key in a previous oplog
region.destroy(2047);
// do a destroy of the key in the current crf
region.destroy("junk");
// the current crf is now all garbage but
// we need to keep the drf around since the older
// oplog has a create that it deletes.
findDiskStore().forceRoll();
// restore the deleted entry.
region.put(2047, getBytes(2047));
for (DiskStore store : cache.listDiskStoresIncludingRegionOwned()) {
store.flush();
}
cache.close();
createCache();
region = regionFactory.createRegion();
validateEntriesExist(region, 512, 2048);
for (int i = 0; i < 512; i++) {
assertNull(region.get(i));
}
BackupManager backup = cache.startBackup(cache.getInternalDistributedSystem().getDistributedMember());
backup.prepareBackup();
backup.finishBackup(backupDir, null, false);
// Put another key to make sure we restore
// from a backup that doesn't contain this key
region.put("A", "A");
cache.close();
// Make sure the restore script refuses to run before we destroy the files.
restoreBackup(true);
// Make sure the disk store is unaffected by the failed restore
createCache();
region = regionFactory.createRegion();
validateEntriesExist(region, 512, 2048);
for (int i = 0; i < 512; i++) {
assertNull(region.get(i));
}
assertEquals("A", region.get("A"));
region.put("B", "B");
cache.close();
// destroy the disk directories
destroyDiskDirs();
// Now the restore script should work
restoreBackup(false);
// Make sure the cache has the restored backup
createCache();
region = regionFactory.createRegion();
validateEntriesExist(region, 512, 2048);
for (int i = 0; i < 512; i++) {
assertNull(region.get(i));
}
assertNull(region.get("A"));
assertNull(region.get("B"));
}
use of org.apache.geode.internal.cache.persistence.BackupManager in project geode by apache.
the class BackupJUnitTest method testCompactionDuringBackup.
@Test
public void testCompactionDuringBackup() throws IOException, InterruptedException {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(diskDirs);
dsf.setMaxOplogSize(1);
dsf.setAutoCompact(false);
dsf.setAllowForceCompaction(true);
dsf.setCompactionThreshold(20);
String name = "diskStore";
DiskStoreImpl ds = (DiskStoreImpl) dsf.create(name);
Region region = createRegion();
// Put enough data to roll some oplogs
for (int i = 0; i < 1024; i++) {
region.put(i, getBytes(i));
}
RestoreScript script = new RestoreScript();
ds.startBackup(backupDir, null, script);
for (int i = 2; i < 1024; i++) {
assertTrue(region.destroy(i) != null);
}
assertTrue(ds.forceCompaction());
// Put another key to make sure we restore
// from a backup that doesn't contain this key
region.put("A", "A");
ds.finishBackup(new BackupManager(cache.getInternalDistributedSystem().getDistributedMember(), cache));
script.generate(backupDir);
cache.close();
destroyDiskDirs();
restoreBackup(false);
createCache();
ds = createDiskStore();
region = createRegion();
validateEntriesExist(region, 0, 1024);
assertNull(region.get("A"));
}
use of org.apache.geode.internal.cache.persistence.BackupManager in project geode by apache.
the class GemFireCacheImpl method startBackup.
public BackupManager startBackup(InternalDistributedMember sender) throws IOException {
BackupManager manager = new BackupManager(sender, this);
if (!this.backupManager.compareAndSet(null, manager)) {
throw new IOException("Backup already in progress");
}
manager.start();
return manager;
}
Aggregations