use of org.apache.geode.cache.query.Struct in project geode by apache.
the class PrCqUsingPoolDUnitTest method testEventsDuringQueryExecution.
/**
* Test for events created during the CQ query execution. When CQs are executed using
* executeWithInitialResults there may be possibility that the region changes during that time may
* not be reflected in the query result set thus making the query data and region data
* inconsistent.
*
* @throws Exception
*/
// GEODE-1181, 1253: random ports, eats exceptions (fixed some), async
@Category(FlakyTest.class)
// behavior
@Test
public void testEventsDuringQueryExecution() throws Exception {
final Host host = Host.getHost(0);
VM server1 = host.getVM(0);
VM server2 = host.getVM(1);
VM client = host.getVM(2);
final String cqName = "testEventsDuringQueryExecution_0";
// Server.
createServer(server1);
createServer(server2);
final int port = server1.invoke(() -> PrCqUsingPoolDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server1.getHost());
String poolName1 = "testEventsDuringQueryExecution";
createPool(client, poolName1, host0, port);
// create CQ.
createCQ(client, poolName1, cqName, cqs[0]);
final int numObjects = 200;
final int totalObjects = 500;
// initialize Region.
server1.invoke(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + regions[0]);
for (int i = 1; i <= numObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Keep updating region (async invocation).
server1.invokeAsync(new CacheSerializableRunnable("Update Region") {
public void run2() throws CacheException {
Region region = getCache().getRegion("/root/" + regions[0]);
for (int i = numObjects + 1; i <= totalObjects; i++) {
Portfolio p = new Portfolio(i);
region.put("" + i, p);
}
}
});
// Execute CQ while update is in progress.
client.invoke(new CacheSerializableRunnable("Execute CQ") {
public void run2() throws CacheException {
QueryService cqService = getCache().getQueryService();
// Get CqQuery object.
CqQuery cq1 = cqService.getCq(cqName);
if (cq1 == null) {
fail("Failed to get CQ " + cqName);
}
SelectResults cqResults = null;
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception ex) {
throw new AssertionError("Failed to execute CQ " + cqName, ex);
}
// getLogWriter().info("initial result size = " + cqResults.size());
CqQueryTestListener cqListener = (CqQueryTestListener) cq1.getCqAttributes().getCqListener();
// Wait for the last key to arrive.
for (int i = 0; i < 4; i++) {
try {
cqListener.waitForCreated("" + totalObjects);
// Found skip from the loop.
break;
} catch (CacheException ex) {
if (i == 3) {
throw ex;
}
}
}
// Check if the events from CqListener are in order.
int oldId = 0;
for (Object cqEvent : cqListener.events.toArray()) {
int newId = new Integer(cqEvent.toString()).intValue();
if (oldId > newId) {
fail("Queued events for CQ Listener during execution with " + "Initial results is not in the order in which they are created.");
}
oldId = newId;
}
// Check if all the IDs are present as part of Select Results and CQ Events.
HashSet ids = new HashSet(cqListener.events);
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
ids.add(s.get("key"));
}
// Iterator iter = cqResults.asSet().iterator();
// while (iter.hasNext()) {
// Portfolio p = (Portfolio)iter.next();
// ids.add(p.getPk());
// //getLogWriter().info("Result set value : " + p.getPk());
// }
HashSet missingIds = new HashSet();
String key = "";
for (int i = 1; i <= totalObjects; i++) {
key = "" + i;
if (!(ids.contains(key))) {
missingIds.add(key);
}
}
if (!missingIds.isEmpty()) {
fail("Missing Keys in either ResultSet or the Cq Event list. " + " Missing keys : [size : " + missingIds.size() + "]" + missingIds + " Ids in ResultSet and CQ Events :" + ids);
}
}
});
cqHelper.closeClient(client);
cqHelper.closeServer(server2);
cqHelper.closeServer(server1);
}
use of org.apache.geode.cache.query.Struct in project geode by apache.
the class CqQueryUsingPoolDUnitTest method executeCQ.
/**
* Execute/register CQ as running.
*
* @param initialResults true if initialResults are requested
* @param expectedResultsSize if >= 0, validate results against this size
* @param expectedErr if not null, an error we expect
*/
public void executeCQ(VM vm, final String cqName, final boolean initialResults, final int expectedResultsSize, final String[] expectedKeys, final String expectedErr) {
vm.invoke(new CacheSerializableRunnable("Execute CQ :" + cqName) {
private void work() throws CacheException {
LogWriterUtils.getLogWriter().info("### DEBUG EXECUTE CQ START ####");
// Get CQ Service.
QueryService cqService = null;
CqQuery cq1 = null;
cqService = getCache().getQueryService();
// Get CqQuery object.
try {
cq1 = cqService.getCq(cqName);
if (cq1 == null) {
LogWriterUtils.getLogWriter().info("Failed to get CqQuery object for CQ name: " + cqName);
fail("Failed to get CQ " + cqName);
} else {
LogWriterUtils.getLogWriter().info("Obtained CQ, CQ name: " + cq1.getName());
assertTrue("newCq() state mismatch", cq1.getState().isStopped());
}
} catch (Exception ex) {
LogWriterUtils.getLogWriter().info("CqService is :" + cqService);
LogWriterUtils.getLogWriter().error(ex);
Assert.fail("Failed to execute CQ " + cqName, ex);
}
if (initialResults) {
SelectResults cqResults = null;
try {
cqResults = cq1.executeWithInitialResults();
} catch (Exception ex) {
fail("Failed to execute CQ " + cqName, ex);
}
LogWriterUtils.getLogWriter().info("initial result size = " + cqResults.size());
assertTrue("executeWithInitialResults() state mismatch", cq1.getState().isRunning());
if (expectedResultsSize >= 0) {
assertEquals("Unexpected results size for CQ: " + cqName + " CQ Query :" + cq1.getQueryString(), expectedResultsSize, cqResults.size());
}
if (expectedKeys != null) {
HashSet resultKeys = new HashSet();
for (Object o : cqResults.asList()) {
Struct s = (Struct) o;
resultKeys.add(s.get("key"));
}
for (int i = 0; i < expectedKeys.length; i++) {
assertTrue("Expected key :" + expectedKeys[i] + " Not found in CqResults for CQ: " + cqName + " CQ Query :" + cq1.getQueryString() + " Keys in CqResults :" + resultKeys, resultKeys.contains(expectedKeys[i]));
}
}
} else {
try {
cq1.execute();
} catch (Exception ex) {
if (expectedErr == null) {
LogWriterUtils.getLogWriter().info("CqService is :" + cqService, ex);
}
Assert.fail("Failed to execute CQ " + cqName, ex);
}
assertTrue("execute() state mismatch", cq1.getState().isRunning());
}
}
@Override
public void run2() throws CacheException {
if (expectedErr != null) {
getCache().getLogger().info("<ExpectedException action=add>" + expectedErr + "</ExpectedException>");
}
try {
work();
} finally {
if (expectedErr != null) {
getCache().getLogger().info("<ExpectedException action=remove>" + expectedErr + "</ExpectedException>");
}
}
}
});
}
use of org.apache.geode.cache.query.Struct in project geode by apache.
the class JsonWriter method writeValueAsJson.
public static void writeValueAsJson(JsonGenerator generator, Object value, String pdxField) throws JsonGenerationException, IOException {
if (value == null) {
generator.writeNull();
} else if (value.getClass().equals(Boolean.class)) {
boolean b = (Boolean) value;
generator.writeBoolean(b);
} else if (value.getClass().equals(Byte.class)) {
Byte b = (Byte) value;
generator.writeNumber(b);
} else if (value.getClass().equals(Short.class)) {
Short b = (Short) value;
generator.writeNumber(b);
} else if (value.getClass().equals(Integer.class)) {
int i = (Integer) value;
generator.writeNumber(i);
} else if (value.getClass().equals(Long.class)) {
long i = (Long) value;
generator.writeNumber(i);
} else if (value.getClass().equals(BigInteger.class)) {
BigInteger i = (BigInteger) value;
generator.writeNumber(i);
} else if (value.getClass().equals(Float.class)) {
float i = (Float) value;
generator.writeNumber(i);
} else if (value.getClass().equals(BigDecimal.class)) {
BigDecimal i = (BigDecimal) value;
generator.writeNumber(i);
} else if (value.getClass().equals(Double.class)) {
double d = (Double) value;
generator.writeNumber(d);
} else if (value.getClass().equals(String.class)) {
String s = (String) value;
generator.writeString(s);
} else if (value.getClass().isArray()) {
writeArrayAsJson(generator, value, pdxField);
} else if (value.getClass().equals(Link.class)) {
writeLinkAsJson(generator, (Link) value, pdxField);
// } else if (value.getClass().equals(Date.class)) {
// generator.writeObject((Date) value);
} else if (value.getClass().equals(EnumInfo.class)) {
/*
* try { generator.writeString(((EnumInfo)value).getEnum().name()); } catch
* (ClassNotFoundException e) { throw new
* IllegalStateException("PdxInstance returns unknwon pdxfield " + pdxField + " for type " +
* value); }
*/
generator.writeString(value.toString());
} else if (value.getClass().equals(PdxInstanceEnumInfo.class)) {
generator.writeString(value.toString());
} else {
if (value instanceof Struct) {
writeStructAsJson(generator, (StructImpl) value);
} else if (value instanceof PdxInstance) {
writePdxInstanceAsJson(generator, (PdxInstance) value);
} else if (value instanceof Collection) {
writeCollectionAsJson(generator, (Collection<?>) value);
} else if (value instanceof Map) {
writeMapAsJson(generator, (Map) value, pdxField);
} else {
generator.writeObject(value);
}
}
}
use of org.apache.geode.cache.query.Struct in project geode by apache.
the class TypedJson method visitSpecialObjects.
List<Object> visitSpecialObjects(Writer w, Object object, boolean write) throws IOException {
List<Object> elements = new ArrayList<Object>();
Class clazz = object.getClass();
if (clazz.isArray()) {
if (write) {
writeArray(w, object);
} else {
return getArrayChildren(object);
}
}
if (clazz.isEnum()) {
if (write) {
writeEnum(w, object);
} else {
elements.add(object);
}
return elements;
}
if (object instanceof TypedJson) {
this.writeTypedJson(w, (TypedJson) object);
return elements;
}
if (object instanceof Collection) {
Collection collection = (Collection) object;
Iterator iter = collection.iterator();
int i = 0;
if (write)
w.write('{');
while (iter.hasNext() && i < queryCollectionsDepth) {
Object item = iter.next();
if (write) {
writeKeyValue(w, i, item, item != null ? item.getClass() : null);
} else {
elements.add(item);
}
i++;
}
if (write)
w.write('}');
return elements;
}
if (object instanceof Map) {
Map map = (Map) object;
Iterator it = map.entrySet().iterator();
int i = 0;
if (write)
w.write('{');
while (it.hasNext() && i < queryCollectionsDepth) {
Map.Entry e = (Map.Entry) it.next();
Object value = e.getValue();
if (write) {
writeKeyValue(w, e.getKey(), value, value != null ? value.getClass() : null);
} else {
elements.add(value);
}
i++;
}
if (write)
w.write('}');
return elements;
}
if (object instanceof PdxInstance) {
PdxInstance pdxInstance = (PdxInstance) object;
if (write)
w.write('{');
for (String field : pdxInstance.getFieldNames()) {
Object fieldValue = pdxInstance.getField(field);
if (write) {
writeKeyValue(w, field, fieldValue, fieldValue != null ? fieldValue.getClass() : null);
} else {
elements.add(fieldValue);
}
}
if (write)
w.write('}');
return elements;
}
if (object instanceof Struct) {
StructImpl impl = (StructImpl) object;
String[] fields = impl.getFieldNames();
Object[] values = impl.getFieldValues();
if (write)
w.write('{');
for (int i = 0; i < fields.length; i++) {
Object fieldValue = values[i];
if (write) {
writeKeyValue(w, fields[i], fieldValue, fieldValue != null ? fieldValue.getClass() : null);
} else {
elements.add(fieldValue);
}
}
if (write)
w.write('}');
return elements;
}
if (object instanceof Region.Entry) {
Region.Entry entry = (Region.Entry) object;
Object key = entry.getKey();
Object value = entry.getValue();
if (write) {
w.write('{');
writeKeyValue(w, key, value, value != null ? value.getClass() : null);
w.write('}');
} else {
elements.add(value);
}
return elements;
}
return elements;
}
use of org.apache.geode.cache.query.Struct in project geode by apache.
the class ZRemRangeByScoreExecutor method executeCommand.
@Override
public void executeCommand(Command command, ExecutionHandlerContext context) {
List<byte[]> commandElems = command.getProcessedCommand();
if (commandElems.size() < 4) {
command.setResponse(Coder.getErrorResponse(context.getByteBufAllocator(), ArityDef.ZREMRANGEBYSCORE));
return;
}
ByteArrayWrapper key = command.getKey();
checkDataType(key, RedisDataType.REDIS_SORTEDSET, context);
Region<ByteArrayWrapper, DoubleWrapper> keyRegion = getRegion(context, key);
if (keyRegion == null) {
command.setResponse(Coder.getIntegerResponse(context.getByteBufAllocator(), NOT_EXISTS));
return;
}
boolean startInclusive = true;
boolean stopInclusive = true;
double start;
double stop;
byte[] startArray = commandElems.get(2);
byte[] stopArray = commandElems.get(3);
String startString = Coder.bytesToString(startArray);
String stopString = Coder.bytesToString(stopArray);
if (startArray[0] == Coder.OPEN_BRACE_ID) {
startString = startString.substring(1);
startInclusive = false;
}
if (stopArray[0] == Coder.OPEN_BRACE_ID) {
stopString = stopString.substring(1);
stopInclusive = false;
}
try {
start = Coder.stringToDouble(startString);
stop = Coder.stringToDouble(stopString);
} catch (NumberFormatException e) {
command.setResponse(Coder.getErrorResponse(context.getByteBufAllocator(), ERROR_NOT_NUMERIC));
return;
}
int numRemoved = 0;
Collection<?> removeList = null;
try {
if (start == Double.NEGATIVE_INFINITY && stop == Double.POSITIVE_INFINITY && startInclusive && stopInclusive) {
numRemoved = keyRegion.size();
context.getRegionProvider().removeKey(key);
} else {
removeList = getKeys(context, key, keyRegion, start, stop, startInclusive, stopInclusive);
}
} catch (Exception e) {
throw new RuntimeException(e);
}
if (removeList != null) {
for (Object entry : removeList) {
ByteArrayWrapper remove = null;
if (entry instanceof Entry)
remove = (ByteArrayWrapper) ((Entry<?, ?>) entry).getKey();
else if (entry instanceof Struct)
remove = (ByteArrayWrapper) ((Struct) entry).getFieldValues()[0];
Object oldVal = keyRegion.remove(remove);
if (oldVal != null)
numRemoved++;
if (keyRegion.isEmpty())
context.getRegionProvider().removeKey(key);
}
}
command.setResponse(Coder.getIntegerResponse(context.getByteBufAllocator(), numRemoved));
}
Aggregations