use of org.apache.geode.internal.cache.persistence.query.CloseableIterator in project geode by apache.
the class PdxStringQueryDUnitTest method testRepliacatedRegionCompactRangeIndex.
@Test
public void testRepliacatedRegionCompactRangeIndex() throws CacheException {
final Host host = Host.getHost(0);
VM server0 = host.getVM(0);
VM server1 = host.getVM(1);
VM server2 = host.getVM(2);
VM client = host.getVM(3);
final int numberOfEntries = 10;
// Start server1 and create index
server0.invoke(new CacheSerializableRunnable("Create Server1") {
public void run2() throws CacheException {
configAndStartBridgeServer(false, false, false);
// create a local query service
QueryService localQueryService = null;
try {
localQueryService = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
// Verify the type of index created
Index index = null;
try {
index = localQueryService.createIndex("statusIndex", "status", regName);
if (!(index instanceof CompactRangeIndex)) {
fail("CompactRange Index should have been created instead of " + index.getClass());
}
} catch (Exception ex) {
fail("Failed to create index." + ex.getMessage());
}
}
});
// Start server2
server1.invoke(new CacheSerializableRunnable("Create Server2") {
public void run2() throws CacheException {
configAndStartBridgeServer(false, false, false);
Region region = getRootRegion().getSubregion(regionName);
}
});
// Start server3
server2.invoke(new CacheSerializableRunnable("Create Server3") {
public void run2() throws CacheException {
configAndStartBridgeServer(false, false, false);
Region region = getRootRegion().getSubregion(regionName);
}
});
// Client pool.
final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server0.getHost());
// Create client pool.
final String poolName = "testClientServerQueryPool";
createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
// Create client region and put PortfolioPdx objects (PdxInstances)
client.invoke(new CacheSerializableRunnable("Create client") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
Region region = createRegion(regionName, rootRegionName, factory.create());
LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
for (int i = 0; i < numberOfEntries; i++) {
region.put("key-" + i, new PortfolioPdx(i));
}
}
});
// Verify if all the index keys are PdxStrings
server0.invoke(new CacheSerializableRunnable("Create Server") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
QueryService localQueryService = getCache().getQueryService();
Index index = localQueryService.getIndex(region, "statusIndex");
CloseableIterator<IndexStoreEntry> iter = ((CompactRangeIndex) index).getIndexStorage().iterator(null);
while (iter.hasNext()) {
Object key = iter.next().getDeserializedKey();
if (!(key instanceof PdxString)) {
fail("All keys of the CompactRangeIndex should be PdxStrings and not " + key.getClass());
}
}
}
});
// Execute queries from client to server and locally on client
SerializableRunnable executeQueries = new CacheSerializableRunnable("Execute queries") {
public void run2() throws CacheException {
QueryService remoteQueryService = null;
QueryService localQueryService = null;
SelectResults[][] rs = new SelectResults[1][2];
try {
remoteQueryService = (PoolManager.find(poolName)).getQueryService();
localQueryService = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
for (int i = 0; i < queryString.length; i++) {
try {
LogWriterUtils.getLogWriter().info("### Executing Query on remote server:" + queryString[i]);
Query query = remoteQueryService.newQuery(queryString[i]);
rs[0][0] = (SelectResults) query.execute();
LogWriterUtils.getLogWriter().info("RR remote indexType: CompactRange size of resultset: " + rs[0][0].size() + " for query: " + queryString[i]);
;
checkForPdxString(rs[0][0].asList(), queryString[i]);
LogWriterUtils.getLogWriter().info("### Executing Query locally on client:" + queryString[i]);
query = localQueryService.newQuery(queryString[i]);
rs[0][1] = (SelectResults) query.execute();
LogWriterUtils.getLogWriter().info("RR client local indexType: CompactRange size of resultset: " + rs[0][1].size() + " for query: " + queryString[i]);
;
checkForPdxString(rs[0][1].asList(), queryString[i]);
if (i < orderByQueryIndex) {
// Compare local and remote query results.
if (!compareResultsOfWithAndWithoutIndex(rs)) {
fail("Local and Remote Query Results are not matching for query :" + queryString[i]);
}
} else {
// compare the order of results returned
compareResultsOrder(rs, false);
}
} catch (Exception e) {
Assert.fail("Failed executing " + queryString[i], e);
}
}
}
};
client.invoke(executeQueries);
// Put Non Pdx objects on server execute queries locally
server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
LogWriterUtils.getLogWriter().info("Put Objects locally on server");
for (int i = numberOfEntries; i < numberOfEntries * 2; i++) {
region.put("key-" + i, new Portfolio(i));
}
QueryService localQueryService = getCache().getQueryService();
// Query server1 locally to check if PdxString is not being returned
for (int i = 0; i < queryString.length; i++) {
try {
LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
LogWriterUtils.getLogWriter().info("RR server local indexType:Range size of resultset: " + rs.size() + " for query: " + queryString[i]);
// The results should not be PdxString
checkForPdxString(rs.asList(), queryString[i]);
} catch (Exception e) {
Assert.fail("Failed executing " + queryString[i], e);
}
}
}
});
// test for readSerialized flag
server0.invoke(new CacheSerializableRunnable("Create Bridge Server") {
public void run2() throws CacheException {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
cache.setReadSerialized(true);
QueryService localQueryService = getCache().getQueryService();
// Query server1 locally to check if PdxString is not being returned
for (int i = 0; i < queryString.length; i++) {
try {
LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
SelectResults rs = (SelectResults) localQueryService.newQuery(queryString[i]).execute();
LogWriterUtils.getLogWriter().info("RR server local readSerializedTrue: indexType: CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
// The results should not be PdxString
checkForPdxString(rs.asList(), queryString[i]);
} catch (Exception e) {
Assert.fail("Failed executing " + queryString[i], e);
}
}
}
});
// test for readSerialized flag on client
client.invoke(new CacheSerializableRunnable("Create client") {
public void run2() throws CacheException {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
cache.setReadSerialized(true);
QueryService remoteQueryService = (PoolManager.find(poolName)).getQueryService();
// Query server1 remotely to check if PdxString is not being returned
for (int i = 0; i < queryString.length; i++) {
try {
LogWriterUtils.getLogWriter().info("### Executing Query locally on server:" + queryString[i]);
SelectResults rs = (SelectResults) remoteQueryService.newQuery(queryString[i]).execute();
LogWriterUtils.getLogWriter().info("RR server remote readSerializedTrue: indexType:CompactRange size of resultset: " + rs.size() + " for query: " + queryString[i]);
// The results should not be PdxString
checkForPdxString(rs.asList(), queryString[i]);
} catch (Exception e) {
Assert.fail("Failed executing " + queryString[i], e);
}
}
}
});
closeClient(server2);
closeClient(client);
closeClient(server1);
closeClient(server0);
}
use of org.apache.geode.internal.cache.persistence.query.CloseableIterator in project geode by apache.
the class PdxStringQueryDUnitTest method testNullPdxString.
@Test
public void testNullPdxString() throws CacheException {
final Host host = Host.getHost(0);
VM server0 = host.getVM(0);
VM server1 = host.getVM(1);
VM server2 = host.getVM(2);
VM client = host.getVM(3);
final int numberOfEntries = 10;
final boolean isPr = true;
// Start server1 and create index
server0.invoke(new CacheSerializableRunnable("Create Server1") {
public void run2() throws CacheException {
configAndStartBridgeServer(isPr, false, false);
// create a local query service
QueryService localQueryService = null;
try {
localQueryService = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
// Verify the type of index created
Index index = null;
try {
index = localQueryService.createIndex("statusIndex", "status", regName);
if (index instanceof PartitionedIndex) {
for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
if (!(o instanceof CompactRangeIndex)) {
fail("CompactRangeIndex Index should have been created instead of " + index.getClass());
}
}
} else {
fail("Partitioned index expected");
}
} catch (Exception ex) {
fail("Failed to create index." + ex.getMessage());
}
}
});
// Start server2
server1.invoke(new CacheSerializableRunnable("Create Server2") {
public void run2() throws CacheException {
configAndStartBridgeServer(isPr, false, false);
Region region = getRootRegion().getSubregion(regionName);
}
});
// Start server3
server2.invoke(new CacheSerializableRunnable("Create Server3") {
public void run2() throws CacheException {
configAndStartBridgeServer(isPr, false, false);
Region region = getRootRegion().getSubregion(regionName);
}
});
// Client pool.
final int port0 = server0.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final int port1 = server1.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final int port2 = server2.invoke(() -> PdxStringQueryDUnitTest.getCacheServerPort());
final String host0 = NetworkUtils.getServerHostName(server0.getHost());
// Create client pool.
final String poolName = "testClientServerQueryPool";
createPool(client, poolName, new String[] { host0 }, new int[] { port0, port1, port2 }, true);
// Create client region and put PortfolioPdx objects (PdxInstances)
client.invoke(new CacheSerializableRunnable("Create client") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setScope(Scope.LOCAL);
ClientServerTestCase.configureConnectionPool(factory, host0, port1, -1, true, -1, -1, null);
Region region = createRegion(regionName, rootRegionName, factory.create());
LogWriterUtils.getLogWriter().info("Put PortfolioPdx");
// Put some PortfolioPdx objects with null Status and secIds
for (int i = 0; i < numberOfEntries * 2; i++) {
PortfolioPdx portfolioPdx = new PortfolioPdx(i);
// this will create NULL PdxStrings
portfolioPdx.status = null;
portfolioPdx.positions = new HashMap();
portfolioPdx.positions.put(null, new PositionPdx(null, PositionPdx.cnt * 1000));
region.put("key-" + i, portfolioPdx);
}
// Put some PortfolioPdx with non null status to reproduce bug#45351
for (int i = 0; i < numberOfEntries; i++) {
PortfolioPdx portfolioPdx = new PortfolioPdx(i);
region.put("key-" + i, portfolioPdx);
}
}
});
// Verify if all the index keys are PdxStrings
server0.invoke(new CacheSerializableRunnable("Create Server") {
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(regionName);
QueryService localQueryService = getCache().getQueryService();
Index index = localQueryService.getIndex(region, "statusIndex");
if (index instanceof PartitionedIndex) {
for (Object o : ((PartitionedIndex) index).getBucketIndexes()) {
CloseableIterator<IndexStoreEntry> iter = ((CompactRangeIndex) o).getIndexStorage().iterator(null);
while (iter.hasNext()) {
Object key = iter.next().getDeserializedKey();
if (!(key instanceof PdxString) && !(key == IndexManager.NULL)) {
fail("All keys of the CompactRangeIndex in the Partitioned index should be PdxStrings and not " + key.getClass());
}
}
}
} else {
fail("Partitioned index expected");
}
}
});
// Execute queries from client to server and locally on client
client.invoke(new CacheSerializableRunnable("Execute queries") {
public void run2() throws CacheException {
QueryService remoteQueryService = null;
QueryService localQueryService = null;
SelectResults[][] rs = new SelectResults[1][2];
try {
remoteQueryService = (PoolManager.find(poolName)).getQueryService();
localQueryService = getCache().getQueryService();
} catch (Exception e) {
Assert.fail("Failed to get QueryService.", e);
}
// Querying the fields with null values
String[] qs = { "SELECT pos.secId FROM " + regName + " p, p.positions.values pos where p.status = null", "SELECT p.pkid FROM " + regName + " p, p.positions.values pos where pos.secId = null" };
for (int i = 0; i < 2; i++) {
try {
Query query = remoteQueryService.newQuery(qs[i]);
SelectResults res = (SelectResults) query.execute();
LogWriterUtils.getLogWriter().info("PR NULL Pdxstring test size of resultset: " + res.size() + " for query: " + qs[i]);
;
if (i == 0) {
for (Object o : res) {
if (o != null) {
fail("Query : " + qs[i] + " should have returned null and not " + o);
}
}
} else {
checkForPdxString(res.asList(), qs[i]);
}
} catch (Exception e) {
Assert.fail("Failed executing " + qs[i], e);
}
}
}
});
closeClient(server2);
closeClient(client);
closeClient(server1);
closeClient(server0);
}
use of org.apache.geode.internal.cache.persistence.query.CloseableIterator in project geode by apache.
the class CompactRangeIndexJUnitTest method testMemoryIndexStoreMaintenanceTransitionFromElemArrayToTokenToConcurrentHashSet.
/**
* Tests race condition when we are transitioning index collection from elem array to concurrent
* hash set The other thread could remove from the empty concurrent hash set. Instead we now set a
* token, do all the puts into a collection and then unsets the token to the new collection
*/
@Test
public void testMemoryIndexStoreMaintenanceTransitionFromElemArrayToTokenToConcurrentHashSet() throws Exception {
try {
index = utils.createIndex("compact range index", "p.status", "/exampleRegion p");
final Region r = utils.getCache().getRegion("/exampleRegion");
Portfolio p0 = new Portfolio(0);
p0.status = "active";
Portfolio p1 = new Portfolio(1);
p1.status = "active";
final Portfolio p2 = new Portfolio(2);
p2.status = "active";
Portfolio p3 = new Portfolio(3);
p3.status = "active";
r.put("0", p0);
r.put("1", p1);
r.put("3", p3);
// now we set the test hook. That way previous calls would not affect the test hooks
DefaultQuery.testHook = new MemoryIndexStoreIndexElemToTokenToConcurrentHashSetTestHook();
final CountDownLatch threadsDone = new CountDownLatch(2);
Thread t2 = new Thread(new Runnable() {
public void run() {
r.put("2", p2);
threadsDone.countDown();
}
});
t2.start();
Thread t0 = new Thread(new Runnable() {
public void run() {
r.remove("0");
threadsDone.countDown();
}
});
t0.start();
threadsDone.await(90, TimeUnit.SECONDS);
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status='active'").execute();
// the remove should have happened
assertEquals(3, results.size());
results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status!='inactive'").execute();
assertEquals(3, results.size());
CompactRangeIndex cindex = (CompactRangeIndex) index;
MemoryIndexStore indexStore = (MemoryIndexStore) cindex.getIndexStorage();
CloseableIterator iterator = indexStore.get("active");
int count = 0;
while (iterator.hasNext()) {
count++;
iterator.next();
}
assertEquals("incorrect number of entries in collection", 3, count);
} finally {
DefaultQuery.testHook = null;
System.setProperty("index_elemarray_threshold", "100");
}
}
use of org.apache.geode.internal.cache.persistence.query.CloseableIterator in project geode by apache.
the class CompactRangeIndexJUnitTest method testCompactRangeIndexMemoryIndexStoreMaintenance.
/**
* Tests race condition where we possibly were missing remove calls due to transitioning to an
* empty index elem before adding the entries the fix is to add the entries to the elem and then
* transition to that elem
*/
@Test
public void testCompactRangeIndexMemoryIndexStoreMaintenance() throws Exception {
try {
index = utils.createIndex("compact range index", "p.status", "/exampleRegion p");
final Region r = utils.getCache().getRegion("/exampleRegion");
Portfolio p0 = new Portfolio(0);
p0.status = "active";
final Portfolio p1 = new Portfolio(1);
p1.status = "active";
r.put("0", p0);
DefaultQuery.testHook = new MemoryIndexStoreREToIndexElemTestHook();
final CountDownLatch threadsDone = new CountDownLatch(2);
Thread t1 = new Thread(new Runnable() {
public void run() {
r.put("1", p1);
threadsDone.countDown();
}
});
t1.start();
Thread t0 = new Thread(new Runnable() {
public void run() {
r.remove("0");
threadsDone.countDown();
}
});
t0.start();
threadsDone.await(90, TimeUnit.SECONDS);
QueryService qs = utils.getCache().getQueryService();
SelectResults results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status='active'").execute();
// the remove should have happened
assertEquals(1, results.size());
results = (SelectResults) qs.newQuery("Select * from /exampleRegion r where r.status!='inactive'").execute();
assertEquals(1, results.size());
CompactRangeIndex cindex = (CompactRangeIndex) index;
MemoryIndexStore indexStore = (MemoryIndexStore) cindex.getIndexStorage();
CloseableIterator iterator = indexStore.get("active");
int count = 0;
while (iterator.hasNext()) {
count++;
iterator.next();
}
assertEquals("incorrect number of entries in collection", 1, count);
} finally {
DefaultQuery.testHook = null;
}
}
use of org.apache.geode.internal.cache.persistence.query.CloseableIterator in project geode by apache.
the class CompactRangeIndex method queryEquijoinCondition.
public List queryEquijoinCondition(IndexProtocol indx, ExecutionContext context) throws TypeMismatchException, FunctionDomainException, NameResolutionException, QueryInvocationTargetException {
// get a read lock when doing a lookup
long start = updateIndexUseStats();
((AbstractIndex) indx).updateIndexUseStats();
List data = new ArrayList();
CloseableIterator<IndexStoreEntry> outer = null;
Iterator inner = null;
try {
// We will iterate over each of the index Map to obtain the keys
outer = indexStore.iterator(null);
if (indx instanceof CompactRangeIndex) {
inner = ((CompactRangeIndex) indx).getIndexStorage().iterator(null);
} else {
inner = ((RangeIndex) indx).getValueToEntriesMap().entrySet().iterator();
}
IndexStoreEntry outerEntry = null;
Object innerEntry = null;
Object outerKey = null;
Object innerKey = null;
boolean incrementInner = true;
outer: while (outer.hasNext()) {
outerEntry = outer.next();
outerKey = outerEntry.getDeserializedKey();
// TODO: eliminate all labels
inner: while (!incrementInner || inner.hasNext()) {
if (incrementInner) {
innerEntry = inner.next();
if (innerEntry instanceof IndexStoreEntry) {
innerKey = ((IndexStoreEntry) innerEntry).getDeserializedKey();
} else {
innerKey = ((Map.Entry) innerEntry).getKey();
}
}
int compare = ((Comparable) outerKey).compareTo(innerKey);
if (compare == 0) {
Object innerValue = null;
CloseableIterator<IndexStoreEntry> iter = null;
try {
if (innerEntry instanceof IndexStoreEntry) {
innerValue = ((CompactRangeIndex) indx).getIndexStorage().get(outerKey);
} else {
innerValue = ((Map.Entry) innerEntry).getValue();
}
iter = indexStore.get(outerKey);
populateListForEquiJoin(data, iter, innerValue, context, innerKey);
} finally {
if (iter != null) {
iter.close();
}
if (innerValue != null && innerValue instanceof CloseableIterator) {
((CloseableIterator<IndexStoreEntry>) innerValue).close();
}
}
incrementInner = true;
continue outer;
} else if (compare < 0) {
// The outer key is smaller than the inner key. That means
// that we need
// to increment the outer loop without moving inner loop.
// incrementOuter = true;
incrementInner = false;
continue outer;
} else {
// The outer key is greater than inner key , so increment the
// inner loop without changing outer
incrementInner = true;
}
}
break;
}
return data;
} finally {
((AbstractIndex) indx).updateIndexUseEndStats(start);
updateIndexUseEndStats(start);
if (outer != null) {
outer.close();
}
if (inner != null && indx instanceof CompactRangeIndex) {
((CloseableIterator<IndexStoreEntry>) inner).close();
}
}
}
Aggregations