use of org.apache.geode.internal.cache.HARegion in project geode by apache.
the class GemFireMemberStatus method initializeRegionSizes.
protected void initializeRegionSizes() {
Iterator rootRegions = cache.rootRegions().iterator();
while (rootRegions.hasNext()) {
LocalRegion rootRegion = (LocalRegion) rootRegions.next();
if (!(rootRegion instanceof HARegion)) {
RegionStatus rootRegionStatus = rootRegion instanceof PartitionedRegion ? new PartitionedRegionStatus((PartitionedRegion) rootRegion) : new RegionStatus(rootRegion);
putRegionStatus(rootRegion.getFullPath(), rootRegionStatus);
Iterator subRegions = rootRegion.subregions(true).iterator();
while (subRegions.hasNext()) {
LocalRegion subRegion = (LocalRegion) subRegions.next();
RegionStatus subRegionStatus = subRegion instanceof PartitionedRegion ? new PartitionedRegionStatus((PartitionedRegion) subRegion) : new RegionStatus(subRegion);
putRegionStatus(subRegion.getFullPath(), subRegionStatus);
}
}
}
}
use of org.apache.geode.internal.cache.HARegion in project geode by apache.
the class HARegionQueue method putGIIDataInRegion.
/**
* Repopulates the HARegion after the GII is over so as to reset the counters and populate the
* DACE objects for the thread identifiers . This method should be invoked as the last method in
* the constructor . Thus while creating BlockingQueue this method should be invoked lastly in the
* derived class constructor , after the HARegionQueue contructor is complete. Otherwise, the
* ReentrantLock will be null.
*/
void putGIIDataInRegion() throws CacheException, InterruptedException {
Set entrySet = this.region.entrySet(false);
// be populated
if (!entrySet.isEmpty()) {
this.puttingGIIDataInQueue = true;
final boolean isDebugEnabled = logger.isDebugEnabled();
try {
Region.Entry entry = null;
Map orderedMap = new TreeMap();
Iterator iterator = entrySet.iterator();
Object key = null;
while (iterator.hasNext()) {
entry = (Region.Entry) iterator.next();
key = entry.getKey();
if (isDebugEnabled) {
logger.debug("processing queue key {} and value {}", key, entry.getValue());
}
if (key instanceof Long) {
if (!(entry.getValue() instanceof ClientMarkerMessageImpl)) {
orderedMap.put(key, entry.getValue());
}
}
this.region.localDestroy(key);
}
long max = 0;
long counterInRegion = 0;
entrySet = orderedMap.entrySet();
if (!entrySet.isEmpty()) {
Map.Entry mapEntry = null;
iterator = entrySet.iterator();
while (iterator.hasNext()) {
mapEntry = (Map.Entry) iterator.next();
Conflatable val = (Conflatable) mapEntry.getValue();
if (val != null && val.getEventId() != null) {
counterInRegion = ((Long) mapEntry.getKey()).intValue();
// TODO: remove this assertion
Assert.assertTrue(counterInRegion > max);
max = counterInRegion;
// putInQueue(val);
// logger.info(LocalizedStrings.DEBUG, this + " putting GII entry #" + counterInRegion
// + " into queue: " + val);
this.put(val);
} else if (isDebugEnabled) {
logger.debug("bug 44959 encountered: HARegion.putGIIDataInRegion found null eventId in {}", val);
}
}
}
this.tailKey.set(max);
} finally {
this.puttingGIIDataInQueue = false;
if (isDebugEnabled) {
logger.debug("{} done putting GII data into queue", this);
}
}
}
// TODO:Asif: Avoid invocation of this method
startHAServices(this.region.getCache());
}
use of org.apache.geode.internal.cache.HARegion in project geode by apache.
the class HARegionJUnitTest method testLocalDestroy.
/**
* test no exception being thrown while doing a localDestroy on a HARegion
*/
@Test
public void testLocalDestroy() throws Exception {
Region region = createHARegion();
region.put("key1", "value1");
region.localDestroy("key1");
assertEquals(region.get("key1"), null);
}
use of org.apache.geode.internal.cache.HARegion in project geode by apache.
the class HARegionJUnitTest method createHARegion.
/**
* create the HARegion
*/
private Region createHARegion() throws TimeoutException, CacheWriterException, GatewayException, CacheExistsException, RegionExistsException, IOException, ClassNotFoundException {
AttributesFactory factory = new AttributesFactory();
factory.setDataPolicy(DataPolicy.REPLICATE);
factory.setScope(Scope.DISTRIBUTED_ACK);
ExpirationAttributes ea = new ExpirationAttributes(2000, ExpirationAction.LOCAL_INVALIDATE);
factory.setStatisticsEnabled(true);
;
factory.setCacheListener(new CacheListenerAdapter() {
@Override
public void afterInvalidate(EntryEvent event) {
}
});
RegionAttributes ra = factory.create();
Region region = HARegion.getInstance("HARegionJUnitTest_region", (GemFireCacheImpl) cache, null, ra);
region.getAttributesMutator().setEntryTimeToLive(ea);
return region;
}
use of org.apache.geode.internal.cache.HARegion in project geode by apache.
the class HARegionQueueDUnitTest method verifyMapsAndData.
/**
* verifies the data has been populated correctly after GII
*/
private static void verifyMapsAndData() {
try {
HARegion r1 = (HARegion) hrq.getRegion();
// region should not be null
assertNotNull(" Did not expect the HARegion to be null but it is", r1);
// it should have ten non null entries
for (int i = 1; i < 11; i++) {
assertNotNull(" Did not expect the entry to be null but it is", r1.get(new Long(i)));
}
// HARegionQueue should not be null
assertNotNull(" Did not expect the HARegionQueue to be null but it is", hrq);
Map conflationMap = hrq.getConflationMapForTesting();
// conflationMap size should be greater than 0
assertTrue(" Did not expect the conflationMap size to be 0 but it is", conflationMap.size() > 0);
Map internalMap = (Map) conflationMap.get("HARegionQueueDUnitTest_region");
// internal map should not be null. it should be present
assertNotNull(" Did not expect the internalMap to be null but it is", internalMap);
// get and verify the entries in the conflation map.
for (int i = 1; i < 11; i++) {
assertTrue(" Did not expect the entry not to be equal but it is", internalMap.get("key" + i).equals(new Long(i)));
}
Map eventMap = hrq.getEventsMapForTesting();
// DACE should not be null
assertNotNull(" Did not expect the result (DACE object) to be null but it is", eventMap.get(new ThreadIdentifier(new byte[] { 1 }, 1)));
Set counterSet = hrq.getCurrentCounterSet(new EventID(new byte[] { 1 }, 1, 1));
assertTrue(" excpected the counter set size to be 10 but it is not so", counterSet.size() == 10);
long i = 1;
Iterator iterator = counterSet.iterator();
// set is a LinkedHashSet
while (iterator.hasNext()) {
assertTrue(((Long) iterator.next()).longValue() == i);
i++;
}
// The last dispactchde sequence Id should be -1 since no dispatch has
// been made
assertTrue(hrq.getLastDispatchedSequenceId(new EventID(new byte[] { 1 }, 1, 1)) == -1);
// sleep for 8.0 seconds. Everythign should expire and everything should
// be null and empty
Thread.sleep(7500);
for (int j = 1; j < 11; j++) {
assertNull("expected the entry to be null since expiry time exceeded but it is not so", r1.get(new Long(j)));
}
internalMap = (Map) hrq.getConflationMapForTesting().get("HARegionQueueDUnitTest_region");
assertNotNull(" Did not expect the internalMap to be null but it is", internalMap);
assertTrue("internalMap (conflation) should have been emptry since expiry of all entries has been exceeded but it is not so", internalMap.isEmpty());
assertTrue("eventMap should have been emptry since expiry of all entries has been exceeded but it is not so", eventMap.isEmpty());
assertTrue("counter set should have been emptry since expiry of all entries has been exceeded but it is not so", counterSet.isEmpty());
} catch (Exception ex) {
fail("failed while region.put()", ex);
}
}
Aggregations