use of org.apache.geode.cache.client.ServerOperationException in project geode by apache.
the class PutAllCSDUnitTest method testPartialKeyInLocalRegion.
/**
* Tests partial key putAll and removeAll to 2 servers with local region
*/
@Test
public void testPartialKeyInLocalRegion() throws CacheException, InterruptedException {
final String title = "testPartialKeyInLocalRegion:";
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client1 = host.getVM(2);
final VM client2 = host.getVM(3);
final String regionName = getUniqueName();
final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
// set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
final int serverPort1 = createBridgeServer(server1, regionName, 0, false, 0, null);
final int serverPort2 = createBridgeServer(server2, regionName, 0, false, 0, null);
createClient(client1, regionName, serverHost, new int[] { serverPort1 }, -1, -1, false, true, true);
createClient(client2, regionName, serverHost, new int[] { serverPort1 }, -1, -1, false, true, true);
server1.invoke(addExceptionTag1(expectedExceptions));
server2.invoke(addExceptionTag1(expectedExceptions));
client1.invoke(addExceptionTag1(expectedExceptions));
client2.invoke(addExceptionTag1(expectedExceptions));
server1.invoke(new CacheSerializableRunnable(title + "server1 add cacheWriter") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// let the server to trigger exception after created 15 keys
region.getAttributesMutator().setCacheWriter(new MyWriter(15));
}
});
client2.invoke(new CacheSerializableRunnable(title + "client2 add listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
region.registerInterest("ALL_KEYS");
LogWriterUtils.getLogWriter().info("client1 registerInterest ALL_KEYS at " + region.getFullPath());
}
});
client1.invoke(new CacheSerializableRunnable(title + "client1 putAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// create keys
try {
doPutAll(regionName, title, numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
assertTrue(soe.getCause() instanceof RuntimeException);
assertTrue(soe.getCause().getMessage().contains("Triggered exception as planned, created 15 keys"));
}
}
});
{
WaitCriterion waitForSizes = new WaitCriterion() {
@Override
public String description() {
return "waiting for conditions to be met";
}
@Override
public boolean done() {
int c1Size = getRegionSize(client1, regionName);
int c2Size = getRegionSize(client2, regionName);
int s1Size = getRegionSize(server1, regionName);
int s2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + c1Size + "," + c2Size + "," + s1Size + "," + s2Size);
if (c1Size != 15) {
LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
return false;
}
if (c2Size != 15) {
LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
return false;
}
if (s1Size != 15) {
LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
return false;
}
if (s2Size != 15) {
LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
return false;
}
return true;
}
};
Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
}
int server1Size = getRegionSize(server1, regionName);
int server2Size = getRegionSize(server1, regionName);
// reset cacheWriter's count to allow another 15 keys to be created
server1.invoke(new CacheSerializableRunnable(title + "server1 add cacheWriter") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// let the server to trigger exception after created 15 keys
region.getAttributesMutator().setCacheWriter(new MyWriter(15));
}
});
// p2p putAll on DR and expect exception
server2.invoke(new CacheSerializableRunnable(title + "server2 add listener and putAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
// create keys
try {
doPutAll(regionName, title + "again:", numberOfEntries);
fail("Expect original RuntimeException caused by cacheWriter");
} catch (RuntimeException rte) {
assertTrue(rte.getMessage().contains("Triggered exception as planned, created 15 keys"));
}
}
});
server2Size = getRegionSize(server1, regionName);
assertEquals(server1Size + 15, server2Size);
{
WaitCriterion waitForSizes = new WaitCriterion() {
@Override
public String description() {
return "waiting for conditions to be met";
}
@Override
public boolean done() {
int c1Size = getRegionSize(client1, regionName);
int c2Size = getRegionSize(client2, regionName);
int s1Size = getRegionSize(server1, regionName);
int s2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + c1Size + "," + c2Size + "," + s1Size + "," + s2Size);
if (c1Size != 15) {
// client 1 did not register interest
LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
return false;
}
if (c2Size != 15 * 2) {
LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
return false;
}
if (s1Size != 15 * 2) {
LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
return false;
}
if (s2Size != 15 * 2) {
LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
return false;
}
return true;
}
};
Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
}
// now do a removeAll that is not allowed to remove everything
server1.invoke(new CacheSerializableRunnable(title + "server1 add cacheWriter") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// server triggers exception after destroying 5 keys
region.getAttributesMutator().setCacheWriter(new MyWriter(5));
}
});
client1.invoke(new CacheSerializableRunnable(title + "client1 removeAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// create keys
try {
doRemoveAll(regionName, title, numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_RemoveAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
assertTrue(soe.getCause() instanceof RuntimeException);
assertTrue(soe.getCause().getMessage().contains("Triggered exception as planned, destroyed 5 keys"));
}
}
});
{
WaitCriterion waitForSizes = new WaitCriterion() {
@Override
public String description() {
return "waiting for conditions to be met";
}
@Override
public boolean done() {
int c1Size = getRegionSize(client1, regionName);
int c2Size = getRegionSize(client2, regionName);
int s1Size = getRegionSize(server1, regionName);
int s2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + c1Size + "," + c2Size + "," + s1Size + "," + s2Size);
if (c1Size != 15 - 5) {
// client 1 did not register interest
LogWriterUtils.getLogWriter().info("waiting for client1 to get all destroys");
return false;
}
if (c2Size != (15 * 2) - 5) {
LogWriterUtils.getLogWriter().info("waiting for client2 to get all destroys");
return false;
}
if (s1Size != (15 * 2) - 5) {
LogWriterUtils.getLogWriter().info("waiting for server1 to get all destroys");
return false;
}
if (s2Size != (15 * 2) - 5) {
LogWriterUtils.getLogWriter().info("waiting for server2 to get all destroys");
return false;
}
return true;
}
};
Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
}
// reset cacheWriter's count to allow another 5 keys to be destroyed
server1.invoke(new CacheSerializableRunnable(title + "server1 add cacheWriter") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// server triggers exception after destroying 5 keys
region.getAttributesMutator().setCacheWriter(new MyWriter(5));
}
});
// p2p putAll on DR and expect exception
server2.invoke(new CacheSerializableRunnable(title + "server2 add listener and removeAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
// create keys
try {
doRemoveAll(regionName, title + "again:", numberOfEntries);
fail("Expect original RuntimeException caused by cacheWriter");
} catch (RuntimeException rte) {
assertTrue(rte.getMessage().contains("Triggered exception as planned, destroyed 5 keys"));
}
}
});
{
WaitCriterion waitForSizes = new WaitCriterion() {
@Override
public String description() {
return "waiting for conditions to be met";
}
@Override
public boolean done() {
int c1Size = getRegionSize(client1, regionName);
int c2Size = getRegionSize(client2, regionName);
int s1Size = getRegionSize(server1, regionName);
int s2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + c1Size + "," + c2Size + "," + s1Size + "," + s2Size);
if (c1Size != 15 - 5) {
// client 1 did not register interest
LogWriterUtils.getLogWriter().info("waiting for client1 to get all destroys");
return false;
}
if (c2Size != (15 * 2) - 5 - 5) {
LogWriterUtils.getLogWriter().info("waiting for client2 to get all destroys");
return false;
}
if (s1Size != (15 * 2) - 5 - 5) {
LogWriterUtils.getLogWriter().info("waiting for server1 to get all destroys");
return false;
}
if (s2Size != (15 * 2) - 5 - 5) {
LogWriterUtils.getLogWriter().info("waiting for server2 to get all destroys");
return false;
}
return true;
}
};
Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
}
server1.invoke(removeExceptionTag1(expectedExceptions));
server2.invoke(removeExceptionTag1(expectedExceptions));
client1.invoke(removeExceptionTag1(expectedExceptions));
client2.invoke(removeExceptionTag1(expectedExceptions));
// Stop server
stopBridgeServers(getCache());
}
use of org.apache.geode.cache.client.ServerOperationException in project geode by apache.
the class PutAllCSDUnitTest method testPartialKeyInPR.
/**
* Tests partial key putAll to 2 PR servers, because putting data at server side is different
* between PR and LR. PR does it in postPutAll. It's not running in singleHop putAll
*/
@Test
public void testPartialKeyInPR() throws CacheException, InterruptedException {
final String title = "testPartialKeyInPR:";
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
VM client1 = host.getVM(2);
VM client2 = host.getVM(3);
final String regionName = getUniqueName();
final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
// set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
final int serverPort1 = createBridgeServer(server1, regionName, 0, true, 0, "ds1");
final int serverPort2 = createBridgeServer(server2, regionName, 0, true, 0, "ds1");
createClient(client1, regionName, serverHost, new int[] { serverPort1, serverPort2 }, -1, -1, false, false, true);
createClient(client2, regionName, serverHost, new int[] { serverPort1, serverPort2 }, -1, -1, false, false, true);
server1.invoke(addExceptionTag1(expectedExceptions));
server2.invoke(addExceptionTag1(expectedExceptions));
client1.invoke(addExceptionTag1(expectedExceptions));
client2.invoke(addExceptionTag1(expectedExceptions));
server1.invoke(new CacheSerializableRunnable(title + "server1 add slow listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(true));
}
});
final SharedCounter sc_server2 = new SharedCounter("server2");
server2.invoke(new CacheSerializableRunnable(title + "server2 add slow listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(server2, true, sc_server2, 10));
}
});
client2.invoke(new CacheSerializableRunnable(title + "client2 add listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
region.registerInterest("ALL_KEYS");
LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at " + region.getFullPath());
}
});
AsyncInvocation async1 = client1.invokeAsync(new CacheSerializableRunnable(title + "client1 add listener and putAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
region.registerInterest("ALL_KEYS");
// create keys
try {
doPutAll(regionName, title, numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
if (!(soe.getCause() instanceof PartitionOfflineException)) {
throw soe;
}
if (!soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath()))) {
throw soe;
}
}
}
});
// server2 will closeCache after created 10 keys
ThreadUtils.join(async1, 30 * 1000);
if (async1.exceptionOccurred()) {
Assert.fail("Aync1 get exceptions:", async1.getException());
}
int client1Size = getRegionSize(client1, regionName);
// client2Size maybe more than client1Size
int client2Size = getRegionSize(client2, regionName);
int server1Size = getRegionSize(server1, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + client1Size + "," + client2Size + "," + server1Size);
// restart server2
createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
server1Size = getRegionSize(server1, regionName);
int server2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes after server2 restarted: " + client1Size + "," + client2Size + "," + server1Size + ":" + server2Size);
assertEquals(client2Size, server1Size);
assertEquals(client2Size, server2Size);
// close a server to re-run the test
closeCache(server2);
server1Size = getRegionSize(server1, regionName);
client1.invoke(new CacheSerializableRunnable(title + "client1 does putAll again") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// create keys
try {
doPutAll(regionName, title + "again:", numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
assertTrue(soe.getCause() instanceof PartitionOfflineException);
}
}
});
int new_server1Size = getRegionSize(server1, regionName);
int new_client1Size = getRegionSize(client1, regionName);
int new_client2Size = getRegionSize(client2, regionName);
LogWriterUtils.getLogWriter().info("region sizes after re-run the putAll: " + new_client1Size + "," + new_client2Size + "," + new_server1Size);
assertEquals(server1Size + numberOfEntries / 2, new_server1Size);
assertEquals(client1Size + numberOfEntries / 2, new_client1Size);
assertEquals(client2Size + numberOfEntries / 2, new_client2Size);
// restart server2
createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
server1Size = getRegionSize(server1, regionName);
server2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes after restart server2: " + server1Size + "," + server2Size);
assertEquals(server1Size, server2Size);
// add a cacheWriter for server to stop after created 15 keys
server1.invoke(new CacheSerializableRunnable(title + "server1 execute P2P putAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// let the server to trigger exception after created 15 keys
region.getAttributesMutator().setCacheWriter(new MyWriter(15));
}
});
// p2p putAll on PR and expect exception
server2.invoke(new CacheSerializableRunnable(title + "server2 add listener and putAll") {
@Override
public void run2() throws CacheException {
// create keys
try {
doPutAll(regionName, title + "once again:", numberOfEntries);
fail("Expected a CacheWriterException to be thrown by test");
} catch (CacheWriterException rte) {
assertTrue(rte.getMessage().contains("Triggered exception as planned, created 15 keys"));
}
}
});
new_server1Size = getRegionSize(server1, regionName);
int new_server2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes after restart server2: " + new_server1Size + "," + new_server2Size);
assertEquals(server1Size + 15, new_server1Size);
assertEquals(server2Size + 15, new_server2Size);
server1.invoke(removeExceptionTag1(expectedExceptions));
server2.invoke(removeExceptionTag1(expectedExceptions));
client1.invoke(removeExceptionTag1(expectedExceptions));
client2.invoke(removeExceptionTag1(expectedExceptions));
// Stop server
stopBridgeServers(getCache());
}
use of org.apache.geode.cache.client.ServerOperationException in project geode by apache.
the class PutAllCSDUnitTest method testPartialKeyInPRSingleHop.
/**
* Tests partial key putAll to 2 PR servers, because putting data at server side is different
* between PR and LR. PR does it in postPutAll. This is a singlehop putAll test.
*/
@Test
public void testPartialKeyInPRSingleHop() throws CacheException, InterruptedException {
final String title = "testPartialKeyInPRSingleHop_";
final int cacheWriterAllowedKeyNum = 16;
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM server2 = host.getVM(1);
final VM client1 = host.getVM(2);
final VM client2 = host.getVM(3);
final String regionName = getUniqueName();
final String serverHost = NetworkUtils.getServerHostName(server1.getHost());
// set <true, false> means <PR=true, notifyBySubscription=false> to test local-invalidates
final int serverPort1 = createBridgeServer(server1, regionName, 0, true, 0, "ds1");
final int serverPort2 = createBridgeServer(server2, regionName, 0, true, 0, "ds1");
createClient(client1, regionName, serverHost, new int[] { serverPort1, serverPort2 }, -1, -1, false, false, true, false);
createClient(client2, regionName, serverHost, new int[] { serverPort1, serverPort2 }, -1, -1, false, false, true);
server1.invoke(addExceptionTag1(expectedExceptions));
server2.invoke(addExceptionTag1(expectedExceptions));
client1.invoke(addExceptionTag1(expectedExceptions));
client2.invoke(addExceptionTag1(expectedExceptions));
try {
client2.invoke(new CacheSerializableRunnable(title + "client2 add listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
region.registerInterest("ALL_KEYS");
LogWriterUtils.getLogWriter().info("client2 registerInterest ALL_KEYS at " + region.getFullPath());
}
});
client1.invoke(new CacheSerializableRunnable(title + "do some putAll to get ClientMetaData for future putAll") {
@Override
public void run2() throws CacheException {
doPutAll(regionName, "key-", numberOfEntries);
}
});
WaitCriterion waitForSizes = new WaitCriterion() {
@Override
public String description() {
return "waiting for conditions to be met";
}
@Override
public boolean done() {
int c1Size = getRegionSize(client1, regionName);
int c2Size = getRegionSize(client2, regionName);
int s1Size = getRegionSize(server1, regionName);
int s2Size = getRegionSize(server2, regionName);
LogWriterUtils.getLogWriter().info("region sizes: " + c1Size + "," + c2Size + "," + s1Size + "," + s2Size);
if (c1Size != numberOfEntries) {
LogWriterUtils.getLogWriter().info("waiting for client1 to get all updates");
return false;
}
if (c2Size != numberOfEntries) {
LogWriterUtils.getLogWriter().info("waiting for client2 to get all updates");
return false;
}
if (s1Size != numberOfEntries) {
LogWriterUtils.getLogWriter().info("waiting for server1 to get all updates");
return false;
}
if (s2Size != numberOfEntries) {
LogWriterUtils.getLogWriter().info("waiting for server2 to get all updates");
return false;
}
return true;
}
};
Wait.waitForCriterion(waitForSizes, 10000, 1000, true);
server1.invoke(new CacheSerializableRunnable(title + "server1 add slow listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(true));
}
});
// add a listener that will close the cache at the 10th update
final SharedCounter sc_server2 = new SharedCounter("server2");
server2.invoke(new CacheSerializableRunnable(title + "server2 add slow listener") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(server2, true, sc_server2, 10));
}
});
AsyncInvocation async1 = client1.invokeAsync(new CacheSerializableRunnable(title + "client1 add listener and putAll") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().addCacheListener(new MyListener(false));
// create keys
try {
doPutAll(regionName, title, numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
}
}
});
// server2 will closeCache after creating 10 keys
ThreadUtils.join(async1, 30 * 1000);
if (async1.exceptionOccurred()) {
Assert.fail("putAll client threw an exception", async1.getException());
}
// restart server2
System.out.println("restarting server 2");
createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
// Test Case1: Trigger singleHop putAll. Stop server2 in middle.
// numberOfEntries/2 + X keys will be created at servers. i.e. X keys at server2,
// numberOfEntries/2 keys at server1.
// The client should receive a PartialResultException due to PartitionOffline
// close a server to re-run the test
closeCache(server2);
client1.invoke(new CacheSerializableRunnable(title + "client1 does putAll again") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// create keys
try {
doPutAll(regionName, title + "again:", numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
}
}
});
// Test Case 2: based on case 1, but this time, there should be no X keys
// created on server2.
// restart server2
createBridgeServer(server2, regionName, serverPort2, true, 0, "ds1");
// add a cacheWriter for server to fail putAll after it created cacheWriterAllowedKeyNum keys
server1.invoke(new CacheSerializableRunnable(title + "server1 add cachewriter to throw exception after created some keys") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
region.getAttributesMutator().setCacheWriter(new MyWriter(cacheWriterAllowedKeyNum));
}
});
client1.invoke(new CacheSerializableRunnable(title + "client1 does putAll once more") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
// create keys
try {
doPutAll(regionName, title + "once more:", numberOfEntries);
fail("Expect ServerOperationException caused by PutAllParitialResultException");
} catch (ServerOperationException soe) {
assertTrue(soe.getMessage().contains(LocalizedStrings.Region_PutAll_Applied_PartialKeys_At_Server_0.toLocalizedString(region.getFullPath())));
}
}
});
} finally {
server1.invoke(removeExceptionTag1(expectedExceptions));
server2.invoke(removeExceptionTag1(expectedExceptions));
client1.invoke(removeExceptionTag1(expectedExceptions));
client2.invoke(removeExceptionTag1(expectedExceptions));
// Stop server
stopBridgeServers(getCache());
}
}
use of org.apache.geode.cache.client.ServerOperationException in project geode by apache.
the class GatewaySenderEventRemoteDispatcher method _dispatchBatch.
private boolean _dispatchBatch(List events, boolean isRetry) {
Exception ex = null;
int currentBatchId = this.processor.getBatchId();
connection = getConnection(true);
int batchIdForThisConnection = this.processor.getBatchId();
GatewaySenderStats statistics = this.sender.getStatistics();
// i.e The connection has been reset. It also resets the batchId.
if (currentBatchId != batchIdForThisConnection || this.processor.isConnectionReset()) {
return false;
}
try {
if (this.processor.isConnectionReset()) {
isRetry = true;
}
SenderProxy sp = new SenderProxy(this.sender.getProxy());
this.connectionLifeCycleLock.readLock().lock();
try {
if (connection != null) {
sp.dispatchBatch_NewWAN(connection, events, currentBatchId, sender.isRemoveFromQueueOnException(), isRetry);
if (logger.isDebugEnabled()) {
logger.debug("{} : Dispatched batch (id={}) of {} events, queue size: {} on connection {}", this.processor.getSender(), currentBatchId, events.size(), this.processor.getQueue().size(), connection);
}
} else {
throw new ConnectionDestroyedException();
}
} finally {
this.connectionLifeCycleLock.readLock().unlock();
}
return true;
} catch (ServerOperationException e) {
Throwable t = e.getCause();
if (t instanceof BatchException70) {
// A BatchException has occurred.
// Do not process the connection as dead since it is not dead.
ex = (BatchException70) t;
} else {
ex = e;
// keep using the connection if we had a batch exception. Else, destroy it
destroyConnection();
}
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher_0_EXCEPTION_DURING_PROCESSING_BATCH_1_ON_CONNECTION_2.toLocalizedString(new Object[] { this, Integer.valueOf(currentBatchId), connection }), ex);
} catch (GemFireIOException e) {
Throwable t = e.getCause();
if (t instanceof MessageTooLargeException) {
// A MessageTooLargeException has occurred.
// Do not process the connection as dead since it is not dead.
ex = (MessageTooLargeException) t;
// Reduce the batch size by half of the configured batch size or number of events in the
// current batch (whichever is less)
int newBatchSize = Math.min(events.size(), this.processor.getBatchSize()) / 2;
logger.warn(LocalizedMessage.create(LocalizedStrings.GatewaySenderEventRemoteDispatcher_MESSAGE_TOO_LARGE_EXCEPTION, new Object[] { events.size(), newBatchSize }), e);
this.processor.setBatchSize(newBatchSize);
statistics.incBatchesResized();
} else {
ex = e;
// keep using the connection if we had a MessageTooLargeException. Else, destroy it
destroyConnection();
}
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher_0_EXCEPTION_DURING_PROCESSING_BATCH_1_ON_CONNECTION_2.toLocalizedString(new Object[] { this, Integer.valueOf(currentBatchId), connection }), ex);
} catch (IllegalStateException e) {
this.processor.setException(new GatewaySenderException(e));
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher_0_EXCEPTION_DURING_PROCESSING_BATCH_1_ON_CONNECTION_2.toLocalizedString(new Object[] { this, Integer.valueOf(currentBatchId), connection }), e);
} catch (Exception e) {
// An Exception has occurred. Get its cause.
Throwable t = e.getCause();
if (t instanceof IOException) {
// An IOException has occurred.
ex = (IOException) t;
} else {
ex = e;
}
// the cause is not going to be BatchException70. So, destroy the connection
destroyConnection();
throw new GatewaySenderException(LocalizedStrings.GatewayEventRemoteDispatcher_0_EXCEPTION_DURING_PROCESSING_BATCH_1_ON_CONNECTION_2.toLocalizedString(new Object[] { this, Integer.valueOf(currentBatchId), connection }), ex);
}
}
use of org.apache.geode.cache.client.ServerOperationException in project geode by apache.
the class ClassNotFoundExceptionDUnitTest method doTest.
public void doTest(final ObjectFactory objectFactory) throws InterruptedException {
IgnoredException.addIgnoredException("SerializationException");
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
VM vm2 = host.getVM(2);
VM vm3 = host.getVM(3);
int port1 = createServerRegion(vm0);
int port2 = createServerRegion(vm1);
createClientRegion(vm2, port1);
createClientRegion(vm3, port2);
SerializableRunnable putKey = new SerializableRunnable() {
public void run() {
Region region = getCache().getRegion("testSimplePdx");
region.put("a", "b");
region.put("b", "b");
for (int i = 0; i < 10; i++) {
region.put(i, i);
}
if (!region.containsKey("test")) {
region.put("test", objectFactory.get());
}
try {
region.put(objectFactory.get(), objectFactory.get());
fail("Should have received an exception");
} catch (SerializationException expected) {
// ok
} catch (ServerOperationException expected) {
if (!(expected.getCause() instanceof SerializationException) && !(expected.getCause() instanceof ClassNotFoundException)) {
throw expected;
}
}
// try {
// region.replace("test", objectFactory.get(), objectFactory.get());
// fail("Should have received an exception");
// } catch(SerializationException expected) {
// //ok
// } catch(ServerOperationException expected) {
// if(!(expected.getCause() instanceof SerializationException) && !(expected.getCause()
// instanceof ClassNotFoundException)) {
// throw expected;
// }
// }
}
};
SerializableRunnable getValue = new SerializableRunnable() {
public void run() {
Region region = getCache().getRegion("testSimplePdx");
try {
assertNotNull(region.get("test"));
fail("Should have received an exception");
} catch (SerializationException expected) {
// ok
} catch (ServerOperationException expected) {
if (!(expected.getCause() instanceof SerializationException) && !(expected.getCause() instanceof ClassNotFoundException)) {
throw expected;
}
}
}
};
SerializableRunnable registerInterest = new SerializableRunnable() {
public void run() {
Region region = getCache().getRegion("testSimplePdx");
try {
ArrayList keys = new ArrayList();
for (int i = 0; i < 1000; i++) {
keys.add(i);
}
keys.add("test");
region.getAll(keys);
fail("Should have received an exception");
} catch (SerializationException expected) {
System.out.println("hi");
// ok
} catch (ServerOperationException expected) {
if (!(expected.getCause() instanceof SerializationException) && !(expected.getCause() instanceof ClassNotFoundException)) {
throw expected;
}
}
}
};
vm2.invoke(putKey);
vm1.invoke(getValue);
vm3.invoke(getValue);
vm3.invoke(registerInterest);
vm1.invoke(putKey);
}
Aggregations