use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class CreateDiskStoreFunction method execute.
@Override
public void execute(FunctionContext context) {
// Declared here so that it's available when returning a Throwable
String memberId = "";
try {
final Object[] args = (Object[]) context.getArguments();
final String diskStoreName = (String) args[0];
final DiskStoreAttributes diskStoreAttrs = (DiskStoreAttributes) args[01];
InternalCache cache = getCache();
DistributedMember member = cache.getDistributedSystem().getDistributedMember();
memberId = member.getId();
// If they set a name use it instead
if (!member.getName().equals("")) {
memberId = member.getName();
}
DiskStoreFactory diskStoreFactory = cache.createDiskStoreFactory(diskStoreAttrs);
diskStoreFactory.create(diskStoreName);
XmlEntity xmlEntity = new XmlEntity(CacheXml.DISK_STORE, "name", diskStoreName);
context.getResultSender().lastResult(new CliFunctionResult(memberId, xmlEntity, "Success"));
} catch (CacheClosedException cce) {
context.getResultSender().lastResult(new CliFunctionResult(memberId, false, null));
} catch (VirtualMachineError e) {
SystemFailure.initiateFailure(e);
throw e;
} catch (Throwable th) {
SystemFailure.checkFailure();
logger.error("Could not create disk store: {}", th.getMessage(), th);
context.getResultSender().lastResult(new CliFunctionResult(memberId, th, null));
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class AlterRuntimeConfigFunction method execute.
@Override
public void execute(FunctionContext context) {
String memberId = "";
try {
Object arg = context.getArguments();
InternalCache cache = getCache();
DistributionConfig config = cache.getInternalDistributedSystem().getConfig();
memberId = cache.getDistributedSystem().getDistributedMember().getId();
Map<String, String> runtimeAttributes = (Map<String, String>) arg;
Set<Entry<String, String>> entries = runtimeAttributes.entrySet();
for (Entry<String, String> entry : entries) {
String attributeName = entry.getKey();
String attributeValue = entry.getValue();
if (attributeName.equals(CliStrings.ALTER_RUNTIME_CONFIG__COPY__ON__READ)) {
cache.setCopyOnRead(Boolean.parseBoolean(attributeValue));
} else if (attributeName.equals(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__LEASE)) {
cache.setLockLease(Integer.parseInt(attributeValue));
} else if (attributeName.equals(CliStrings.ALTER_RUNTIME_CONFIG__LOCK__TIMEOUT)) {
int lockTimeout = Integer.parseInt(attributeValue);
cache.setLockTimeout(lockTimeout);
} else if (attributeName.equals(CliStrings.ALTER_RUNTIME_CONFIG__SEARCH__TIMEOUT)) {
cache.setSearchTimeout(Integer.parseInt(attributeValue));
} else if (attributeName.equals(CliStrings.ALTER_RUNTIME_CONFIG__MESSAGE__SYNC__INTERVAL)) {
cache.setMessageSyncInterval(Integer.parseInt(attributeValue));
} else {
config.setAttribute(attributeName, attributeValue, ConfigSource.runtime());
}
}
CliFunctionResult cliFuncResult = new CliFunctionResult(memberId, true, null);
context.getResultSender().lastResult(cliFuncResult);
} catch (CacheClosedException cce) {
CliFunctionResult result = new CliFunctionResult(memberId, false, null);
context.getResultSender().lastResult(result);
} catch (Exception e) {
logger.error("Exception happened on : " + memberId, e);
CliFunctionResult cliFuncResult = new CliFunctionResult(memberId, e, CliUtil.stackTraceAsString(e));
context.getResultSender().lastResult(cliFuncResult);
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class MyDistributedSystemListener method addedDistributedSystem.
/**
* Please note that dynamic addition of the sender id to region is not yet available.
*/
public void addedDistributedSystem(int remoteDsId) {
cache = CacheFactory.getAnyInstance();
// remoteDsId = 2
if (remoteDsId == 2) {
if (cache != null) {
GatewaySender serialSender = cache.createGatewaySenderFactory().setManualStart(true).setPersistenceEnabled(false).setDiskStoreName("LN_" + remoteDsId).create("LN_" + remoteDsId, remoteDsId);
System.out.println("Sender Created : " + serialSender.getId());
Region region = cache.createRegionFactory().create("MyRegion");
System.out.println("Created Region : " + region.getName());
try {
serialSender.start();
System.out.println("Sender Started: " + serialSender.getId());
} catch (Exception e) {
e.printStackTrace();
}
} else {
throw new CacheClosedException("Cache is not initialized here");
}
} else {
// gatewayReceiver with
if (cache != null) {
Region region = cache.createRegionFactory().create("MyRegion");
System.out.println("Created Region :" + region.getName());
GatewayReceiver receiver = cache.createGatewayReceiverFactory().setStartPort(12345).setManualStart(true).create();
System.out.println("Created GatewayReceiver : " + receiver);
try {
receiver.start();
System.out.println("GatewayReceiver Started.");
} catch (IOException e) {
e.printStackTrace();
}
}
}
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class DistributedRegionFunction method execute.
@Override
public void execute(FunctionContext context) {
RegionFunctionContext rcontext = (RegionFunctionContext) context;
Region<Object, Object> region = rcontext.getDataSet();
InternalDistributedSystem sys = InternalDistributedSystem.getConnectedInstance();
sys.getLogWriter().fine("DistributedRegionFunction#execute( " + rcontext + " )");
Assert.assertTrue(region.getAttributes().getDataPolicy().withStorage());
Assert.assertTrue(region.getAttributes().getDataPolicy() != DataPolicy.NORMAL);
Assert.assertTrue(rcontext.getFilter().size() == 20);
long startTime = System.currentTimeMillis();
// the body itself
if (Boolean.TRUE.equals(rcontext.getArguments())) {
// do not close cache in retry
if (!rcontext.isPossibleDuplicate()) {
sys.disconnect();
throw new CacheClosedException("Throwing CacheClosedException " + "to simulate failover during function exception");
}
} else {
WaitCriterion wc = new WaitCriterion() {
String excuse;
public boolean done() {
return false;
}
public String description() {
return excuse;
}
};
Wait.waitForCriterion(wc, 12000, 500, false);
}
long endTime = System.currentTimeMillis();
// intentionally doing region operation to cause cacheClosedException
region.put("execKey-201", new Integer(201));
if (rcontext.isPossibleDuplicate()) {
// Below operation is done when the
// function is reexecuted
region.put("execKey-202", new Integer(202));
region.put("execKey-203", new Integer(203));
}
sys.getLogWriter().fine("Time wait for Function Execution = " + (endTime - startTime));
for (int i = 0; i < 5000; i++) {
context.getResultSender().sendResult(Boolean.TRUE);
}
context.getResultSender().lastResult(Boolean.TRUE);
}
use of org.apache.geode.cache.CacheClosedException in project geode by apache.
the class PersistentRVVRecoveryDUnitTest method testWriteCorrectVersionToKrf.
/**
* Test that when we generate a krf, we write the version tag that matches the entry in the crf.
*/
@Test
public void testWriteCorrectVersionToKrf() throws Throwable {
Host host = Host.getHost(0);
final VM vm0 = host.getVM(1);
final LocalRegion region = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
// The idea here is to do a bunch of puts with async persistence
// At some point the oplog will switch. At that time, we wait for a krf
// to be created and then throw an exception to shutdown the disk store.
//
// This should cause us to create a krf with some entries that have been
// modified since the crf was written and are still in the async queue.
//
// To avoid deadlocks, we need to mark that the oplog was switched,
// and then do the wait in the flusher thread.
// Setup the callbacks to wait for krf creation and throw an exception
IgnoredException ex = IgnoredException.addIgnoredException("DiskAccessException");
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = true;
try {
final CountDownLatch krfCreated = new CountDownLatch(1);
final AtomicBoolean oplogSwitched = new AtomicBoolean(false);
CacheObserverHolder.setInstance(new CacheObserverAdapter() {
@Override
public void afterKrfCreated() {
krfCreated.countDown();
}
@Override
public void afterWritingBytes() {
if (oplogSwitched.get()) {
try {
if (!krfCreated.await(3000, TimeUnit.SECONDS)) {
fail("KRF was not created in 30 seconds!");
}
} catch (InterruptedException e) {
fail("interrupted");
}
// Force a failure
throw new DiskAccessException();
}
}
@Override
public void afterSwitchingOplog() {
oplogSwitched.set(true);
}
});
// This is just to make sure the first oplog is not completely garbage.
region.put("testkey", "key");
// Do some puts to trigger an oplog roll.
try {
// Starting with a value of 1 means the value should match
// the region version for easier debugging.
int i = 1;
while (krfCreated.getCount() > 0) {
i++;
region.put("key" + (i % 3), i);
Thread.sleep(2);
}
} catch (CacheClosedException | DiskAccessException expected) {
// do nothing
}
// Wait for the region to be destroyed. The region won't be destroyed
// until the async flusher thread ends up switching oplogs
Wait.waitForCriterion(new WaitCriterion() {
@Override
public boolean done() {
return region.isDestroyed();
}
@Override
public String description() {
return "Region was not destroyed : " + region.isDestroyed();
}
}, 3000 * 1000, 100, true);
closeCache();
} finally {
ex.remove();
LocalRegion.ISSUE_CALLBACKS_TO_CACHE_OBSERVER = false;
CacheObserverHolder.setInstance(null);
}
// Get the version tags from the krf
LocalRegion recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
VersionTag[] tagsFromKrf = new VersionTag[3];
for (int i = 0; i < 3; i++) {
NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
tagsFromKrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
LogWriterUtils.getLogWriter().info("krfTag[" + i + "]=" + tagsFromKrf[i] + ",value=" + entry.getValue());
}
closeCache();
// Set a system property to skip recovering from the krf so we can
// get the version tag from the crf.
System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "true");
try {
// Get the version tags from the crf
recoveredRegion = (LocalRegion) createAsyncRegionWithSmallQueue(vm0);
VersionTag[] tagsFromCrf = new VersionTag[3];
for (int i = 0; i < 3; i++) {
NonTXEntry entry = (NonTXEntry) recoveredRegion.getEntry("key" + i);
tagsFromCrf[i] = entry.getRegionEntry().getVersionStamp().asVersionTag();
LogWriterUtils.getLogWriter().info("crfTag[" + i + "]=" + tagsFromCrf[i] + ",value=" + entry.getValue());
}
// Make sure the version tags from the krf and the crf match.
for (int i = 0; i < 3; i++) {
assertEquals(tagsFromCrf[i], tagsFromKrf[i]);
}
} finally {
System.setProperty(DiskStoreImpl.RECOVER_VALUES_SYNC_PROPERTY_NAME, "false");
}
}
Aggregations