use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DescribeDiskStoreFunction method execute.
public void execute(final FunctionContext context) {
Cache cache = getCache();
try {
if (cache instanceof InternalCache) {
InternalCache gemfireCache = (InternalCache) cache;
DistributedMember member = gemfireCache.getMyId();
String diskStoreName = (String) context.getArguments();
String memberId = member.getId();
String memberName = member.getName();
DiskStore diskStore = gemfireCache.findDiskStore(diskStoreName);
if (diskStore != null) {
DiskStoreDetails diskStoreDetails = new DiskStoreDetails(diskStore.getDiskStoreUUID(), diskStore.getName(), memberId, memberName);
diskStoreDetails.setAllowForceCompaction(diskStore.getAllowForceCompaction());
diskStoreDetails.setAutoCompact(diskStore.getAutoCompact());
diskStoreDetails.setCompactionThreshold(diskStore.getCompactionThreshold());
diskStoreDetails.setMaxOplogSize(diskStore.getMaxOplogSize());
diskStoreDetails.setQueueSize(diskStore.getQueueSize());
diskStoreDetails.setTimeInterval(diskStore.getTimeInterval());
diskStoreDetails.setWriteBufferSize(diskStore.getWriteBufferSize());
diskStoreDetails.setDiskUsageWarningPercentage(diskStore.getDiskUsageWarningPercentage());
diskStoreDetails.setDiskUsageCriticalPercentage(diskStore.getDiskUsageCriticalPercentage());
setDiskDirDetails(diskStore, diskStoreDetails);
setRegionDetails(gemfireCache, diskStore, diskStoreDetails);
setCacheServerDetails(gemfireCache, diskStore, diskStoreDetails);
setGatewayDetails(gemfireCache, diskStore, diskStoreDetails);
setPdxSerializationDetails(gemfireCache, diskStore, diskStoreDetails);
setAsyncEventQueueDetails(gemfireCache, diskStore, diskStoreDetails);
context.getResultSender().lastResult(diskStoreDetails);
} else {
context.getResultSender().sendException(new DiskStoreNotFoundException(String.format("A disk store with name (%1$s) was not found on member (%2$s).", diskStoreName, memberName)));
}
}
} catch (Exception e) {
logger.error("Error occurred while executing 'describe disk-store': {}!", e.getMessage(), e);
context.getResultSender().sendException(e);
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DestroyDiskStoreFunction method execute.
@Override
public void execute(FunctionContext context) {
// Declared here so that it's available when returning a Throwable
String memberId = "";
try {
final Object[] args = (Object[]) context.getArguments();
final String diskStoreName = (String) args[0];
InternalCache cache = getCache();
DistributedMember member = cache.getDistributedSystem().getDistributedMember();
memberId = member.getId();
// If they set a name use it instead
if (!member.getName().equals("")) {
memberId = member.getName();
}
DiskStore diskStore = cache.findDiskStore(diskStoreName);
CliFunctionResult result;
if (diskStore != null) {
XmlEntity xmlEntity = new XmlEntity(CacheXml.DISK_STORE, "name", diskStoreName);
diskStore.destroy();
result = new CliFunctionResult(memberId, xmlEntity, "Success");
} else {
result = new CliFunctionResult(memberId, false, "Disk store not found on this member");
}
context.getResultSender().lastResult(result);
} catch (IllegalStateException isex) {
CliFunctionResult result = new CliFunctionResult(memberId, false, isex.getMessage());
context.getResultSender().lastResult(result);
} catch (CacheClosedException cce) {
CliFunctionResult result = new CliFunctionResult(memberId, false, null);
context.getResultSender().lastResult(result);
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable th) {
SystemFailure.checkFailure();
logger.error("Could not destroy disk store: {}", th.getMessage(), th);
CliFunctionResult result = new CliFunctionResult(memberId, th, null);
context.getResultSender().lastResult(result);
}
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class PeerTypeRegistration method hasPersistentRegions.
public boolean hasPersistentRegions() {
Collection<DiskStore> diskStores = cache.listDiskStoresIncludingRegionOwned();
boolean hasPersistentRegions = false;
for (DiskStore store : diskStores) {
hasPersistentRegions |= ((DiskStoreImpl) store).hasPersistedData();
}
return hasPersistentRegions;
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskRegRecoveryJUnitTest method testBug41340.
@Test
public void testBug41340() throws Exception {
diskProps.setDiskDirs(dirs);
diskProps.setPersistBackup(true);
diskProps.setSynchronous(true);
diskProps.setRolling(true);
diskProps.setRegionName("testBug41340");
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
assertEquals(0, region.size());
// put some entries
region.put("0", "0");
region.put("1", "1");
region.put("2", "2");
region.put("3", "3");
// Create another oplog
DiskStore store = cache.findDiskStore(region.getAttributes().getDiskStoreName());
store.forceRoll();
// Now create and destroy all of the entries in the new
// oplog. This should cause us to remove the CRF but leave
// the DRF, which has creates in reverse order. Now we have
// garbage destroys which have higher IDs than any crate
region.put("4", "1");
region.put("5", "2");
region.put("6", "3");
region.destroy("0");
region.destroy("6");
region.destroy("5");
region.destroy("4");
store.forceRoll();
// Force a recovery
GemFireCacheImpl.getInstance().close();
cache = createCache();
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
assertEquals(3, region.size());
// With bug 41340, this is reusing an oplog id.
region.put("7", "7");
// region.close();
// Force another recovery
GemFireCacheImpl.getInstance().close();
cache = createCache();
region = DiskRegionHelperFactory.getSyncPersistOnlyRegion(cache, diskProps, Scope.LOCAL);
// Make sure we didn't lose the entry
assertEquals(4, region.size());
assertEquals("7", region.get("7"));
region.close();
}
use of org.apache.geode.cache.DiskStore in project geode by apache.
the class DiskOfflineCompactionJUnitTest method testForceRollTwoEntriesWithUpdateAndDestroy.
@Test
public void testForceRollTwoEntriesWithUpdateAndDestroy() throws Exception {
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setAutoCompact(false);
String name = "testForceRollTwoEntriesWithUpdateAndDestroy";
DiskStore diskStore = dsf.create(name);
File crfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.crf");
File drfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.drf");
File krfFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_1.krf");
File crf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.crf");
File drf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.drf");
File krf2File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_2.krf");
File ifFile = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + ".if");
AttributesFactory af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
Region r = cache.createRegion("r", af.create());
int extra_byte_num_per_entry = InternalDataSerializer.calculateBytesForTSandDSID(getDSID((LocalRegion) r));
// extra key to keep oplog1 from being empty
r.put("key0", "value0");
r.put("key1", "value1");
r.put("key2", "value2");
diskStore.forceRoll();
r.put("key1", "update1");
r.put("key2", "update2");
r.remove("key2");
cache.close();
ds.disconnect();
DiskStoreImpl.validate(name, diskStore.getDiskDirs());
int headerSize = Oplog.OPLOG_MAGIC_SEQ_REC_SIZE + Oplog.OPLOG_DISK_STORE_REC_SIZE;
int crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, false) + Oplog.OPLOG_NEW_ENTRY_BASE_REC_SIZE;
int createsize0 = getSize4Create(extra_byte_num_per_entry, "key0", "value0");
int createsize1 = getSize4Create(extra_byte_num_per_entry, "key1", "value1");
int createsize2 = getSize4Create(extra_byte_num_per_entry, "key2", "value2");
int updatesize1 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key1", "update1");
int updatesize2 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key2", "update2");
int tombstonesize1 = getSize4TombstoneWithoutKey(extra_byte_num_per_entry);
assertEquals(crfsize + createsize0 + createsize1 + createsize2, crfFile.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(0, null, true), drfFile.length());
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
assertEquals(crfsize + updatesize1 + updatesize2 + tombstonesize1, crf2File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf2File.length());
long originalIfLength = ifFile.length();
DiskStoreImpl dsi = DiskStoreImpl.offlineCompact(name, diskStore.getDiskDirs(), false, -1);
assertEquals(3, dsi.getDeadRecordCount());
assertEquals(3, dsi.getLiveEntryCount());
assertEquals(false, crfFile.exists());
assertEquals(false, drfFile.exists());
assertEquals(false, krfFile.exists());
assertEquals(false, crf2File.exists());
assertEquals(false, drf2File.exists());
assertEquals(false, krf2File.exists());
File crf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.crf");
File drf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.drf");
File krf3File = new File(diskStore.getDiskDirs()[0], "BACKUP" + name + "_3.krf");
assertEquals(true, crf3File.exists());
assertEquals(true, drf3File.exists());
assertEquals(true, krf3File.exists());
// after offline compaction, rvv is reset, and only 3 update-with-key,
// i.e. key0, key1, key2(tombstone) in _3.crf
crfsize = headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 1 }, false);
int updatesize0 = getSize4UpdateWithKey(extra_byte_num_per_entry, "key0", "value0");
tombstonesize1 = getSize4TombstoneWithKey(extra_byte_num_per_entry, "key2");
assertEquals(crfsize + updatesize0 + updatesize1 + tombstonesize1, crf3File.length());
assertEquals(headerSize + Oplog.OPLOG_GEMFIRE_VERSION_REC_SIZE + getRVVSize(1, new int[] { 0 }, true), drf3File.length());
// Now we preallocate spaces for if files and also crfs and drfs. So the below check is not true
// any more.
// if (originalIfLength <= ifFile.length()) {
// fail("expected " + ifFile.length() + " to be < " + originalIfLength);
// }
connectDSandCache();
dsf = cache.createDiskStoreFactory();
diskStore = dsf.create(name);
af = new AttributesFactory();
af.setDiskStoreName(name);
af.setDataPolicy(DataPolicy.PERSISTENT_REPLICATE);
r = cache.createRegion("r", af.create());
assertEquals(2, r.size());
assertEquals("value0", r.get("key0"));
assertEquals("update1", r.get("key1"));
// if test passed clean up files
r.destroyRegion();
diskStore.destroy();
}
Aggregations