use of org.apache.geode.pdx.PdxInstance in project geode by apache.
the class VMCachedDeserializable method getDeserializedValue.
public Object getDeserializedValue(Region r, RegionEntry re) {
Object v = this.value;
if (v instanceof byte[]) {
// org.apache.geode.internal.cache.GemFireCache.getInstance().getLogger().info("DEBUG
// getDeserializedValue r=" + r + " re=" + re, new RuntimeException("STACK"));
LRUEntry le = null;
if (re != null) {
assert r != null;
if (re instanceof LRUEntry) {
le = (LRUEntry) re;
}
}
if (le != null) {
if (r instanceof PartitionedRegion) {
r = ((PartitionedRegion) r).getBucketRegion(re.getKey());
}
boolean callFinish = false;
AbstractLRURegionMap lruMap = null;
if (r != null) {
// fix for bug 44795
lruMap = (AbstractLRURegionMap) ((LocalRegion) r).getRegionMap();
}
boolean threadAlreadySynced = Thread.holdsLock(le);
boolean isCacheListenerInvoked = re.isCacheListenerInvocationInProgress();
synchronized (le) {
v = this.value;
if (!(v instanceof byte[]))
return v;
v = EntryEventImpl.deserialize((byte[]) v);
if (threadAlreadySynced && !isCacheListenerInvoked) {
// if the thread that called us was already synced.
return v;
}
if (!(v instanceof PdxInstance)) {
this.value = v;
if (lruMap != null) {
callFinish = lruMap.beginChangeValueForm(le, this, v);
}
}
}
if (callFinish && !isCacheListenerInvoked) {
lruMap.finishChangeValueForm();
}
} else {
// we sync on this so we will only do one deserialize
synchronized (this) {
v = this.value;
if (!(v instanceof byte[]))
return v;
v = EntryEventImpl.deserialize((byte[]) v);
if (!(v instanceof PdxInstance)) {
this.value = v;
}
// ObjectSizer os = null;
// if (r != null) {
// EvictionAttributes ea = r.getAttributes().getEvictionAttributes();
// if (ea != null) {
// os = ea.getObjectSizer();
// }
// int vSize = CachedDeserializableFactory.calcMemSize(v, os, false, false);
// if (vSize != -1) {
// int oldSize = this.valueSize;
// this.valueSize = vSize;
// if (r instanceof BucketRegion) {
// BucketRegion br = (BucketRegion)r;
// br.updateBucketMemoryStats(vSize - oldSize);
// }
// // @todo do we need to update some lru stats since the size changed?
// }
// // If vSize == -1 then leave valueSize as is which is the serialized size.
}
}
}
return v;
}
use of org.apache.geode.pdx.PdxInstance in project geode by apache.
the class HashIndexQueryIntegrationTest method testPdxWithStringIndexKeyValues.
@Test
public void testPdxWithStringIndexKeyValues() throws Exception {
createPartitionedRegion("test_region");
int numEntries = 10;
Index index = qs.createHashIndex("idHash", "p.id", "/test_region p");
for (int i = 0; i < numEntries; i++) {
PdxInstance record = CacheUtils.getCache().createPdxInstanceFactory("test_region").writeString("id", "" + i).writeString("domain", "A").create();
region.put("" + i, record);
}
SelectResults results = (SelectResults) qs.newQuery("SELECT DISTINCT tr.domain FROM /test_region tr WHERE tr.id='1'").execute();
assertEquals(1, results.size());
assertTrue(observer.indexUsed);
}
use of org.apache.geode.pdx.PdxInstance in project geode by apache.
the class SelectStarQueryDUnitTest method testSelectStarQueryForPdxObjects.
@Test
public void testSelectStarQueryForPdxObjects() throws Exception {
final Host host = Host.getHost(0);
final VM server1 = host.getVM(0);
final VM client = host.getVM(3);
// create servers and regions
final int port1 = startReplicatedCacheServer(server1);
server1.invoke(new SerializableCallable("Set observer") {
@Override
public Object call() throws Exception {
oldObserver = QueryObserverHolder.setInstance(new QueryResultTrackingObserver());
return null;
}
});
// create client
client.invoke(new SerializableCallable("Create client") {
@Override
public Object call() throws Exception {
ClientCacheFactory cf = new ClientCacheFactory();
cf.addPoolServer(getServerHostName(server1.getHost()), port1);
ClientCache cache = getClientCache(cf);
cache.createClientRegionFactory(ClientRegionShortcut.CACHING_PROXY).create(regName);
return null;
}
});
// Update with serialized PortfolioPdx objects
client.invoke(new SerializableCallable("Put objects") {
@Override
public Object call() throws Exception {
Region r1 = getRootRegion(regName);
for (int i = 0; i < 20; i++) {
r1.put("key-" + i, new PortfolioPdx(i));
}
return null;
}
});
// query remotely from client
client.invoke(new SerializableCallable("Query") {
@Override
public Object call() throws Exception {
getLogWriter().info("Querying remotely from client");
QueryService localQS = null;
QueryService remoteQS = null;
try {
localQS = ((ClientCache) getCache()).getLocalQueryService();
remoteQS = ((ClientCache) getCache()).getQueryService();
} catch (Exception e) {
fail("Exception getting query service ", e);
}
SelectResults res = null;
SelectResults[][] sr = new SelectResults[1][2];
for (int i = 0; i < queries.length; i++) {
try {
res = (SelectResults) localQS.newQuery(queries[i]).execute();
sr[0][0] = res;
res = (SelectResults) remoteQS.newQuery(queries[i]).execute();
sr[0][1] = res;
CacheUtils.compareResultsOfWithAndWithoutIndex(sr);
} catch (Exception e) {
fail("Error executing query: " + queries[i], e);
}
assertEquals(resultSize[i], res.size());
if (i == 3) {
int cnt = ((Integer) res.iterator().next());
assertEquals(20, cnt);
} else {
for (Object rs : res) {
if (rs instanceof StructImpl) {
for (Object obj : ((StructImpl) rs).getFieldValues()) {
if (obj instanceof PortfolioPdx || obj instanceof PositionPdx) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PortfolioPdx and not " + obj.getClass());
}
}
} else if (rs instanceof PortfolioPdx) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PortfolioPdx and not " + rs.getClass());
}
}
}
}
return null;
}
});
// verify if objects iterated by query are serialized
server1.invoke(new SerializableCallable("Get observer") {
@Override
public Object call() throws Exception {
QueryObserver observer = QueryObserverHolder.getInstance();
assertTrue(QueryObserverHolder.hasObserver());
assertTrue(observer instanceof QueryResultTrackingObserver);
QueryResultTrackingObserver resultObserver = (QueryResultTrackingObserver) observer;
assertTrue(resultObserver.isObjectSerialized());
return null;
}
});
// verify if objects returned by local server query are not serialized
server1.invoke(new SerializableCallable("Query") {
@Override
public Object call() throws Exception {
QueryObserver observer = QueryObserverHolder.setInstance(new QueryResultTrackingObserver());
QueryService qs = null;
try {
qs = getCache().getQueryService();
} catch (Exception e) {
fail("Exception getting query service ", e);
}
SelectResults res = null;
for (int i = 0; i < queries.length; i++) {
try {
res = (SelectResults) qs.newQuery(queries[i]).execute();
} catch (Exception e) {
fail("Error executing query: " + queries[i], e);
}
assertEquals(resultSize[i], res.size());
if (i == 3) {
int cnt = ((Integer) res.iterator().next());
assertEquals(20, cnt);
} else {
for (Object rs : res) {
if (rs instanceof StructImpl) {
for (Object obj : ((StructImpl) rs).getFieldValues()) {
if (obj instanceof PortfolioPdx || obj instanceof PositionPdx) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PortfolioPdx and not " + obj.getClass());
}
}
} else if (rs instanceof PortfolioPdx) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PortfolioPdx and not " + rs.getClass());
}
}
}
}
observer = QueryObserverHolder.getInstance();
assertTrue(QueryObserverHolder.hasObserver());
assertTrue(observer instanceof QueryResultTrackingObserver);
QueryResultTrackingObserver resultObserver = (QueryResultTrackingObserver) observer;
assertFalse(resultObserver.isObjectSerialized());
QueryObserverHolder.setInstance(oldObserver);
return null;
}
});
// verify if Pdx instances are returned by local server query
// if read-serialized is set true
server1.invoke(new SerializableCallable("Query") {
@Override
public Object call() throws Exception {
GemFireCacheImpl cache = (GemFireCacheImpl) getCache();
cache.setReadSerialized(true);
QueryService qs = null;
try {
qs = getCache().getQueryService();
} catch (Exception e) {
fail("Exception getting query service ", e);
}
SelectResults res = null;
for (int i = 0; i < queries.length; i++) {
try {
res = (SelectResults) qs.newQuery(queries[i]).execute();
} catch (Exception e) {
fail("Error executing query: " + queries[i], e);
}
assertEquals(resultSize[i], res.size());
if (i == 3) {
int cnt = ((Integer) res.iterator().next());
assertEquals(20, cnt);
} else {
for (Object rs : res) {
if (rs instanceof StructImpl) {
for (Object obj : ((StructImpl) rs).getFieldValues()) {
if (obj instanceof PdxInstance) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PdxInstance and not " + obj.getClass());
}
}
} else if (rs instanceof PdxInstance) {
} else {
fail("Result objects for remote client query: " + queries[i] + " should be instance of PdxInstance and not " + rs.getClass());
}
}
}
}
return null;
}
});
closeCache(client);
closeCache(server1);
}
use of org.apache.geode.pdx.PdxInstance in project geode by apache.
the class PdxLuceneSerializer method toDocument.
@Override
public void toDocument(Object value, Document doc) {
PdxInstance pdx = (PdxInstance) value;
for (String field : indexedFields) {
if (pdx.hasField(field)) {
Object fieldValue = pdx.getField(field);
if (fieldValue == null) {
continue;
}
SerializerUtil.addField(doc, field, fieldValue);
}
}
if (logger.isDebugEnabled()) {
logger.debug("PdxLuceneSerializer.toDocument:" + doc);
}
}
use of org.apache.geode.pdx.PdxInstance in project geode by apache.
the class PdxDeleteFieldDUnitTest method testPdxDeleteFieldVersioning.
@Test
public void testPdxDeleteFieldVersioning() throws Exception {
final String DS_NAME = "PdxDeleteFieldDUnitTestDiskStore";
final String DS_NAME2 = "PdxDeleteFieldDUnitTestDiskStore2";
final Properties props = new Properties();
final int[] locatorPorts = AvailablePortHelper.getRandomAvailableTCPPorts(2);
props.setProperty(MCAST_PORT, "0");
props.setProperty(LOCATORS, "localhost[" + locatorPorts[0] + "],localhost[" + locatorPorts[1] + "]");
props.setProperty(ENABLE_CLUSTER_CONFIGURATION, "false");
final File f = new File(DS_NAME);
f.mkdir();
final File f2 = new File(DS_NAME2);
f2.mkdir();
this.filesToBeDeleted.add(DS_NAME);
this.filesToBeDeleted.add(DS_NAME2);
Host host = Host.getHost(0);
VM vm1 = host.getVM(0);
VM vm2 = host.getVM(1);
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
region1.put("key1", new PdxValue(1, 2L));
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
disconnectFromDS();
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
Object v = region1.get("key1");
assertNotNull(v);
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Collection<PdxType> types = DiskStoreImpl.pdxDeleteField(DS_NAME, new File[] { f }, PdxValue.class.getName(), "fieldToDelete");
assertEquals(1, types.size());
PdxType pt = types.iterator().next();
assertEquals(PdxValue.class.getName(), pt.getClassName());
assertEquals(null, pt.getPdxField("fieldToDelete"));
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[0] + "]");
final Cache cache = (new CacheFactory(props)).setPdxPersistent(true).setPdxDiskStore(DS_NAME).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f });
dsf.create(DS_NAME);
RegionFactory<String, PdxValue> rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME);
Region<String, PdxValue> region1 = rf1.create("region1");
return null;
}
});
vm2.invoke(new SerializableCallable() {
public Object call() throws Exception {
props.setProperty(START_LOCATOR, "localhost[" + locatorPorts[1] + "]");
final Cache cache = (new CacheFactory(props)).setPdxReadSerialized(true).setPdxPersistent(true).setPdxDiskStore(DS_NAME2).create();
DiskStoreFactory dsf = cache.createDiskStoreFactory();
dsf.setDiskDirs(new File[] { f2 });
dsf.create(DS_NAME2);
RegionFactory rf1 = cache.createRegionFactory(RegionShortcut.REPLICATE_PERSISTENT);
rf1.setDiskStoreName(DS_NAME2);
Region region1 = rf1.create("region1");
PdxInstance v = (PdxInstance) region1.get("key1");
assertNotNull(v);
assertEquals(1, v.getField("value"));
assertEquals(null, v.getField("fieldToDelete"));
cache.close();
return null;
}
});
vm1.invoke(new SerializableCallable() {
public Object call() throws Exception {
Cache cache = CacheFactory.getAnyInstance();
if (cache != null && !cache.isClosed()) {
cache.close();
}
return null;
}
});
}
Aggregations