use of org.apache.geode.internal.offheap.MemoryAllocatorImpl in project geode by apache.
the class MultiVMRegionTestCase method testRemoteCacheWriter.
/**
* Tests that a remote {@link CacheWriter} is invoked and that <code>CacheWriter</code> arguments
* and {@link CacheWriterException}s are propagated appropriately.
*/
@Test
public void testRemoteCacheWriter() throws Exception {
assertTrue(getRegionAttributes().getScope().isDistributed());
final String name = this.getUniqueName();
final Object key = "KEY";
final Object oldValue = "OLD_VALUE";
final Object newValue = "NEW_VALUE";
final Object arg = "ARG";
final Object exception = "EXCEPTION";
final Object key2 = "KEY2";
final Object value2 = "VALUE2";
SerializableRunnable create = new CacheSerializableRunnable("Create Region") {
@Override
public void run2() throws CacheException {
Region region = createRegion(name);
// Put key2 in the region before any callbacks are
// registered, so it can be destroyed later
region.put(key2, value2);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
LocalRegion reRegion;
reRegion = (LocalRegion) region;
RegionEntry re = reRegion.getRegionEntry(key2);
StoredObject so = (StoredObject) re._getValue();
assertEquals(1, so.getRefCount());
assertEquals(1, ma.getStats().getObjects());
}
}
};
Host host = Host.getHost(0);
VM vm0 = host.getVM(0);
VM vm1 = host.getVM(1);
vm0.invoke(create);
vm1.invoke(create);
//////// Create
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeCreate2(EntryEvent event) throws CacheWriterException {
if (exception.equals(event.getCallbackArgument())) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isCreate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(null, event.getOldValue());
assertEquals(oldValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
flushIfNecessary(region);
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, oldValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNull(region.getEntry(key));
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Create with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, oldValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Update
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeUpdate2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isUpdate());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(oldValue, event.getOldValue());
assertEquals(newValue, event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.put(key, newValue, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
Region.Entry entry = region.getEntry(key);
assertEquals(oldValue, entry.getValue());
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
LocalRegion reRegion;
reRegion = (LocalRegion) region;
StoredObject so = (StoredObject) reRegion.getRegionEntry(key)._getValue();
assertEquals(1, so.getRefCount());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Update with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.put(key, newValue, arg);
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeDestroy2(EntryEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
assertEquals(key, event.getKey());
assertEquals(newValue, event.getOldValue());
assertNull(event.getNewValue());
assertFalse(event.getOperation().isLoad());
assertFalse(event.getOperation().isLocalLoad());
assertFalse(event.getOperation().isNetLoad());
assertFalse(event.getOperation().isNetSearch());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroy(key, exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
assertNotNull(region.getEntry(key));
assertEquals(2, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(2, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
region.destroy(key, arg);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
//////// Region Destroy
vm1.invoke(new CacheSerializableRunnable("Set Writer") {
@Override
public void run2() throws CacheException {
final Region region = getRootRegion().getSubregion(name);
writer = new TestCacheWriter() {
@Override
public void beforeRegionDestroy2(RegionEvent event) throws CacheWriterException {
Object argument = event.getCallbackArgument();
if (exception.equals(argument)) {
String s = "Test Exception";
throw new CacheWriterException(s);
}
assertEquals(arg, argument);
assertEquals(region, event.getRegion());
assertTrue(event.getOperation().isRegionDestroy());
assertTrue(event.getOperation().isDistributed());
assertFalse(event.getOperation().isExpiration());
assertTrue(event.isOriginRemote());
}
};
region.getAttributesMutator().setCacheWriter(writer);
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Exception") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
try {
region.destroyRegion(exception);
fail("Should have thrown a CacheWriterException");
} catch (CacheWriterException ex) {
if (region.isDestroyed()) {
fail("should not have an exception if region is destroyed", ex);
}
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
vm0.invoke(new CacheSerializableRunnable("Destroy with Argument") {
@Override
public void run2() throws CacheException {
Region region = getRootRegion().getSubregion(name);
assertEquals(1, region.size());
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
assertEquals(1, ma.getStats().getObjects());
}
region.destroyRegion(arg);
if (region.getAttributes().getOffHeap() && !(region instanceof PartitionedRegion)) {
GemFireCacheImpl gfc = (GemFireCacheImpl) getCache();
final MemoryAllocatorImpl ma = (MemoryAllocatorImpl) gfc.getOffHeapStore();
WaitCriterion waitForStatChange = new WaitCriterion() {
@Override
public boolean done() {
return ma.getStats().getObjects() == 0;
}
@Override
public String description() {
return "never saw off-heap object count go to zero. Last value was " + ma.getStats().getObjects();
}
};
Wait.waitForCriterion(waitForStatChange, 3000, 10, true);
}
}
});
vm1.invoke(new SerializableRunnable("Verify callback") {
@Override
public void run() {
assertTrue(writer.wasInvoked());
}
});
}
use of org.apache.geode.internal.offheap.MemoryAllocatorImpl in project geode by apache.
the class ClientServerGetAllDUnitTest method testLargeOffHeapGetAllFromServer.
@Test
public void testLargeOffHeapGetAllFromServer() throws Throwable {
final Host host = Host.getHost(0);
final VM server = host.getVM(0);
final VM client = host.getVM(1);
final String regionName = getUniqueName();
final int serverPort = AvailablePortHelper.getRandomAvailableTCPPort();
final String serverHost = NetworkUtils.getServerHostName(server.getHost());
createBridgeServer(server, regionName, serverPort, false, false, true);
createBridgeClient(client, regionName, serverHost, new int[] { serverPort }, true);
final int VALUE_SIZE = 1024 * 2;
final int VALUE_COUNT = 100;
client.invoke(new CacheSerializableRunnable("put entries on server") {
@Override
public void run2() throws CacheException {
final byte[] VALUE = new byte[VALUE_SIZE];
for (int i = 0; i < VALUE_SIZE; i++) {
VALUE[i] = (byte) i;
}
Region region = getRootRegion(regionName);
for (int i = 0; i < VALUE_COUNT; i++) {
region.put("k" + i, new UnitTestValueHolder(VALUE));
}
}
});
CacheSerializableRunnable clientGetAll = new CacheSerializableRunnable("Get all entries from server") {
@Override
public void run2() throws CacheException {
// Build collection of keys
Collection keys = new ArrayList();
for (int i = 0; i < VALUE_COUNT; i++) {
keys.add("k" + i);
}
// Invoke getAll
Region region = getRootRegion(regionName);
final int GET_COUNT = 10;
long start = System.currentTimeMillis();
Map result = null;
for (int i = 0; i < GET_COUNT; i++) {
// allow gc to get rid of previous map before deserializing the next
result = null;
// one
result = region.getAll(keys);
}
long end = System.currentTimeMillis();
long totalBytesRead = ((long) GET_COUNT * VALUE_COUNT * VALUE_SIZE);
long elapsedMillis = (end - start);
System.out.println("PERF: read " + totalBytesRead + " bytes in " + elapsedMillis + " millis. bps=" + (((double) totalBytesRead / elapsedMillis) * 1000));
// Verify result size is correct
assertEquals(VALUE_COUNT, result.size());
final byte[] EXPECTED = new byte[VALUE_SIZE];
for (int i = 0; i < VALUE_SIZE; i++) {
EXPECTED[i] = (byte) i;
}
// (the server has a loader that returns the key as the value)
for (Iterator i = keys.iterator(); i.hasNext(); ) {
String key = (String) i.next();
assertTrue(result.containsKey(key));
Object value = result.get(key);
if (value instanceof UnitTestValueHolder) {
Object v = ((UnitTestValueHolder) value).getValue();
if (v instanceof byte[]) {
byte[] bytes = (byte[]) v;
if (bytes.length != VALUE_SIZE) {
fail("expected value for key " + key + " to be an array of size " + (VALUE_SIZE) + " but it was: " + bytes.length);
}
if (!Arrays.equals(EXPECTED, bytes)) {
fail("expected bytes=" + Arrays.toString(bytes) + " to be expected=" + Arrays.toString(EXPECTED));
}
} else {
fail("expected v for key " + key + " to be a byte array but it was: " + v);
}
} else {
fail("expected value for key " + key + " to be a UnitTestValueHolder but it was: " + value);
}
}
}
};
// Run getAll
{
final int THREAD_COUNT = 4;
AsyncInvocation[] ais = new AsyncInvocation[THREAD_COUNT];
for (int i = 0; i < THREAD_COUNT; i++) {
ais[i] = client.invokeAsync(clientGetAll);
}
for (int i = 0; i < THREAD_COUNT; i++) {
ais[i].getResult();
}
}
server.invoke(new CacheSerializableRunnable("Dump OffHeap Stats") {
@Override
public void run2() throws CacheException {
MemoryAllocatorImpl ma = MemoryAllocatorImpl.getAllocator();
System.out.println("STATS: objects=" + ma.getStats().getObjects() + " usedMemory=" + ma.getStats().getUsedMemory() + " reads=" + ma.getStats().getReads());
}
});
checkServerForOrphans(server, regionName);
stopBridgeServer(server);
}
use of org.apache.geode.internal.offheap.MemoryAllocatorImpl in project geode by apache.
the class OldValueImporterTestBase method testValueSerialization.
@Test
public void testValueSerialization() throws Exception {
byte[] bytes = new byte[1024];
HeapDataOutputStream hdos = new HeapDataOutputStream(bytes);
OldValueImporter imsg = createImporter();
// null byte array value
{
OldValueImporter omsg = createImporter();
omsg.importOldBytes(null, false);
toData(omsg, hdos);
fromData(imsg, bytes);
assertEquals(null, getOldValueFromImporter(imsg));
}
// null object value
{
OldValueImporter omsg = createImporter();
omsg.importOldObject(null, true);
toData(omsg, hdos);
fromData(imsg, bytes);
assertEquals(null, getOldValueFromImporter(imsg));
}
// simple byte array
{
byte[] baValue = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9 };
OldValueImporter omsg = createImporter();
omsg.importOldBytes(baValue, false);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(baValue, (byte[]) getOldValueFromImporter(imsg));
}
// String in serialized form
{
String stringValue = "1,2,3,4,5,6,7,8,9";
byte[] stringValueBlob = EntryEventImpl.serialize(stringValue);
OldValueImporter omsg = createImporter();
omsg.importOldBytes(stringValueBlob, true);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(stringValueBlob, ((VMCachedDeserializable) getOldValueFromImporter(imsg)).getSerializedValue());
}
// String in object form
{
String stringValue = "1,2,3,4,5,6,7,8,9";
byte[] stringValueBlob = EntryEventImpl.serialize(stringValue);
OldValueImporter omsg = createImporter();
omsg.importOldObject(stringValue, true);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(stringValueBlob, ((VMCachedDeserializable) getOldValueFromImporter(imsg)).getSerializedValue());
}
// off-heap DataAsAddress byte array
{
MemoryAllocatorImpl sma = MemoryAllocatorImpl.createForUnitTest(new NullOutOfOffHeapMemoryListener(), new NullOffHeapMemoryStats(), new SlabImpl[] { new SlabImpl(1024 * 1024) });
try {
byte[] baValue = new byte[] { 1, 2 };
TinyStoredObject baValueSO = (TinyStoredObject) sma.allocateAndInitialize(baValue, false, false);
OldValueImporter omsg = createImporter();
omsg.importOldObject(baValueSO, false);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(baValue, (byte[]) getOldValueFromImporter(imsg));
} finally {
MemoryAllocatorImpl.freeOffHeapMemory();
}
}
// off-heap Chunk byte array
{
MemoryAllocatorImpl sma = MemoryAllocatorImpl.createForUnitTest(new NullOutOfOffHeapMemoryListener(), new NullOffHeapMemoryStats(), new SlabImpl[] { new SlabImpl(1024 * 1024) });
try {
byte[] baValue = new byte[] { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17 };
OffHeapStoredObject baValueSO = (OffHeapStoredObject) sma.allocateAndInitialize(baValue, false, false);
OldValueImporter omsg = createImporter();
omsg.importOldObject(baValueSO, false);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(baValue, (byte[]) getOldValueFromImporter(imsg));
} finally {
MemoryAllocatorImpl.freeOffHeapMemory();
}
}
// off-heap DataAsAddress String
{
MemoryAllocatorImpl sma = MemoryAllocatorImpl.createForUnitTest(new NullOutOfOffHeapMemoryListener(), new NullOffHeapMemoryStats(), new SlabImpl[] { new SlabImpl(1024 * 1024) });
try {
String baValue = "12";
byte[] baValueBlob = BlobHelper.serializeToBlob(baValue);
TinyStoredObject baValueSO = (TinyStoredObject) sma.allocateAndInitialize(baValueBlob, true, false);
OldValueImporter omsg = createImporter();
omsg.importOldObject(baValueSO, true);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(baValueBlob, ((VMCachedDeserializable) getOldValueFromImporter(imsg)).getSerializedValue());
} finally {
MemoryAllocatorImpl.freeOffHeapMemory();
}
}
// off-heap Chunk String
{
MemoryAllocatorImpl sma = MemoryAllocatorImpl.createForUnitTest(new NullOutOfOffHeapMemoryListener(), new NullOffHeapMemoryStats(), new SlabImpl[] { new SlabImpl(1024 * 1024) });
try {
String baValue = "12345678";
byte[] baValueBlob = BlobHelper.serializeToBlob(baValue);
OffHeapStoredObject baValueSO = (OffHeapStoredObject) sma.allocateAndInitialize(baValueBlob, true, false);
OldValueImporter omsg = createImporter();
omsg.importOldObject(baValueSO, true);
hdos = new HeapDataOutputStream(bytes);
toData(omsg, hdos);
fromData(imsg, bytes);
assertArrayEquals(baValueBlob, ((VMCachedDeserializable) getOldValueFromImporter(imsg)).getSerializedValue());
} finally {
MemoryAllocatorImpl.freeOffHeapMemory();
}
}
}
use of org.apache.geode.internal.offheap.MemoryAllocatorImpl in project geode by apache.
the class OffHeapTestUtil method checkOrphans.
public static void checkOrphans() {
MemoryAllocatorImpl allocator = null;
try {
allocator = MemoryAllocatorImpl.getAllocator();
} catch (CacheClosedException ignore) {
// no off-heap memory so no orphans
return;
}
long end = System.currentTimeMillis() + 5000;
List<MemoryBlock> orphans = allocator.getOrphans();
// Wait for the orphans to go away
while (orphans != null && !orphans.isEmpty() && System.currentTimeMillis() < end) {
try {
Thread.sleep(100);
} catch (InterruptedException e) {
throw new RuntimeException(e);
}
orphans = allocator.getOrphans();
}
if (orphans != null && !orphans.isEmpty()) {
List<RefCountChangeInfo> info = ReferenceCountHelper.getRefCountInfo(orphans.get(0).getAddress());
System.out.println("FOUND ORPHAN!!");
System.out.println("Sample orphan: " + orphans.get(0));
System.out.println("Orphan info: " + info);
}
assertEquals(Collections.emptyList(), orphans);
}
Aggregations