use of org.apache.geode.internal.cache.KeyInfo in project geode by apache.
the class ParallelQueueRemovalMessageJUnitTest method validateDestroyFromBucketQueueAndTempQueueInUninitializedBucketRegionQueue.
@Ignore
@Test
public void validateDestroyFromBucketQueueAndTempQueueInUninitializedBucketRegionQueue() {
// Validate initial BucketRegionQueue state
assertFalse(this.bucketRegionQueue.isInitialized());
assertEquals(0, this.bucketRegionQueue.size());
// Create a real ConcurrentParallelGatewaySenderQueue
ParallelGatewaySenderEventProcessor processor = createConcurrentParallelGatewaySenderQueue();
// Add an event to the BucketRegionQueue and verify BucketRegionQueue state
GatewaySenderEventImpl event = this.bucketRegionQueueHelper.addEvent(KEY);
assertEquals(1, this.bucketRegionQueue.size());
// Add a mock GatewaySenderEventImpl to the temp queue
BlockingQueue<GatewaySenderEventImpl> tempQueue = createTempQueueAndAddEvent(processor, event);
assertEquals(1, tempQueue.size());
// Create and process a ParallelQueueRemovalMessage (causes the value of the entry to be set to
// DESTROYED)
when(this.queueRegion.getKeyInfo(KEY, null, null)).thenReturn(new KeyInfo(KEY, null, null));
createAndProcessParallelQueueRemovalMessage();
// Validate temp queue is empty after processing ParallelQueueRemovalMessage
assertEquals(0, tempQueue.size());
// Clean up destroyed tokens
this.bucketRegionQueueHelper.cleanUpDestroyedTokensAndMarkGIIComplete();
// Validate BucketRegionQueue is empty after processing ParallelQueueRemovalMessage
assertEquals(0, this.bucketRegionQueue.size());
}
use of org.apache.geode.internal.cache.KeyInfo in project geode by apache.
the class GetMessage method operateOnPartitionedRegion.
@Override
protected boolean operateOnPartitionedRegion(final DistributionManager dm, PartitionedRegion r, long startTime) throws ForceReattemptException {
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.trace(LogMarker.DM, "GetMessage operateOnRegion: {}", r.getFullPath());
}
PartitionedRegionDataStore ds = r.getDataStore();
if (this.getTXUniqId() != TXManagerImpl.NOTX) {
assert r.getDataView() instanceof TXStateProxy;
}
RawValue valueBytes;
Object val = null;
try {
if (ds != null) {
VersionTagHolder event = new VersionTagHolder();
try {
KeyInfo keyInfo = r.getKeyInfo(key, cbArg);
boolean lockEntry = forceUseOfPRExecutor || isDirectAck();
val = r.getDataView().getSerializedValue(r, keyInfo, !lockEntry, this.context, event, returnTombstones);
if (val == BucketRegion.REQUIRES_ENTRY_LOCK) {
Assert.assertTrue(!lockEntry);
this.forceUseOfPRExecutor = true;
if (logger.isDebugEnabled()) {
logger.debug("Rescheduling GetMessage due to possible cache-miss");
}
schedule(dm);
return false;
}
valueBytes = val instanceof RawValue ? (RawValue) val : new RawValue(val);
} catch (DistributedSystemDisconnectedException sde) {
sendReply(getSender(), this.processorId, dm, new ReplyException(new ForceReattemptException(LocalizedStrings.GetMessage_OPERATION_GOT_INTERRUPTED_DUE_TO_SHUTDOWN_IN_PROGRESS_ON_REMOTE_VM.toLocalizedString(), sde)), r, startTime);
return false;
} catch (PrimaryBucketException pbe) {
sendReply(getSender(), getProcessorId(), dm, new ReplyException(pbe), r, startTime);
return false;
} catch (DataLocationException e) {
sendReply(getSender(), getProcessorId(), dm, new ReplyException(e), r, startTime);
return false;
}
if (logger.isTraceEnabled(LogMarker.DM)) {
logger.debug("GetMessage sending serialized value {} back via GetReplyMessage using processorId: {}", valueBytes, getProcessorId());
}
r.getPrStats().endPartitionMessagesProcessing(startTime);
GetReplyMessage.send(getSender(), getProcessorId(), valueBytes, getReplySender(dm), event.getVersionTag());
// response
return false;
} else {
throw new InternalGemFireError(LocalizedStrings.GetMessage_GET_MESSAGE_SENT_TO_WRONG_MEMBER.toLocalizedString());
}
} finally {
OffHeapHelper.release(val);
}
}
use of org.apache.geode.internal.cache.KeyInfo in project geode by apache.
the class FetchEntryMessage method operateOnPartitionedRegion.
@Override
protected boolean operateOnPartitionedRegion(DistributionManager dm, PartitionedRegion r, long startTime) throws ForceReattemptException {
// FetchEntryMessage is used in refreshing client caches during interest list recovery,
// so don't be too verbose or hydra tasks may time out
PartitionedRegionDataStore ds = r.getDataStore();
EntrySnapshot val;
if (ds != null) {
try {
KeyInfo keyInfo = r.getKeyInfo(key);
val = (EntrySnapshot) r.getDataView().getEntryOnRemote(keyInfo, r, true);
r.getPrStats().endPartitionMessagesProcessing(startTime);
FetchEntryReplyMessage.send(getSender(), getProcessorId(), val, dm, null);
} catch (TransactionException tex) {
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(tex));
} catch (PRLocallyDestroyedException pde) {
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(new ForceReattemptException(LocalizedStrings.FetchEntryMessage_ENCOUNTERED_PRLOCALLYDESTROYED.toLocalizedString(), pde)));
} catch (EntryNotFoundException enfe) {
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(LocalizedStrings.FetchEntryMessage_ENTRY_NOT_FOUND.toLocalizedString(), enfe));
} catch (PrimaryBucketException pbe) {
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(pbe));
} catch (ForceReattemptException pbe) {
pbe.checkKey(key);
// Slightly odd -- we're marshalling the retry to the peer on another host...
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(pbe));
} catch (DataLocationException e) {
FetchEntryReplyMessage.send(getSender(), getProcessorId(), null, dm, new ReplyException(e));
}
} else {
throw new InternalGemFireError(LocalizedStrings.FetchEntryMessage_FETCHENTRYMESSAGE_MESSAGE_SENT_TO_WRONG_MEMBER.toLocalizedString());
}
// response
return false;
}
use of org.apache.geode.internal.cache.KeyInfo in project geode by apache.
the class ParallelQueueRemovalMessageJUnitTest method validateDestroyKeyFromBucketQueueInUninitializedBucketRegionQueue.
@Ignore
@Test
public void validateDestroyKeyFromBucketQueueInUninitializedBucketRegionQueue() throws Exception {
// Validate initial BucketRegionQueue state
assertEquals(0, this.bucketRegionQueue.size());
assertFalse(this.bucketRegionQueue.isInitialized());
// Add an event to the BucketRegionQueue and verify BucketRegionQueue state
this.bucketRegionQueueHelper.addEvent(KEY);
assertEquals(1, this.bucketRegionQueue.size());
// Create and process a ParallelQueueRemovalMessage (causes the value of the entry to be set to
// DESTROYED)
when(this.queueRegion.getKeyInfo(KEY, null, null)).thenReturn(new KeyInfo(KEY, null, null));
createAndProcessParallelQueueRemovalMessage();
// Clean up destroyed tokens and validate BucketRegionQueue
this.bucketRegionQueueHelper.cleanUpDestroyedTokensAndMarkGIIComplete();
assertEquals(0, this.bucketRegionQueue.size());
}
use of org.apache.geode.internal.cache.KeyInfo in project geode by apache.
the class Bug38741DUnitTest method testPartitionedRegionAndCopyOnRead.
/**
* Test to ensure that a PartitionedRegion doesn't make more than the expected number of copies
* when copy-on-read is set to true
*
* @throws Exception
*/
@Test
public void testPartitionedRegionAndCopyOnRead() throws Exception {
final Host h = Host.getHost(0);
final VM accessor = h.getVM(2);
final VM datastore = h.getVM(3);
final String rName = getUniqueName();
final String k1 = "k1";
datastore.invoke(new CacheSerializableRunnable("Create PR DataStore") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(new PartitionAttributesFactory().setRedundantCopies(0).create());
createRootRegion(rName, factory.create());
}
});
accessor.invoke(new CacheSerializableRunnable("Create PR Accessor and put new value") {
public void run2() throws CacheException {
AttributesFactory factory = new AttributesFactory();
factory.setPartitionAttributes(new PartitionAttributesFactory().setLocalMaxMemory(0).setRedundantCopies(0).create());
Region r = createRootRegion(rName, factory.create());
SerializationCountingValue val = new SerializationCountingValue();
r.put(k1, val);
// First put to a bucket will serialize once to determine the size of the value
// to know how much extra space the new bucket with the new entry will consume
// and serialize again to send the bytes
assertEquals(2, val.count.get());
// A put to an already created bucket should only be serialized once
val = new SerializationCountingValue();
r.put(k1, val);
assertEquals(1, val.count.get());
}
});
datastore.invoke(new CacheSerializableRunnable("assert datastore entry serialization count") {
public void run2() throws CacheException {
PartitionedRegion pr = (PartitionedRegion) getRootRegion(rName);
// Visit the one bucket (since there is only one value in the entire PR)
// to directly copy the entry bytes and assert the serialization count.
// All this extra work is to assure the serialization count does not increase
// (by de-serializing the value stored in the map, which would then have to be
// re-serialized).
pr.getDataStore().visitBuckets(new BucketVisitor() {
public void visit(Integer bucketId, Region r) {
BucketRegion br = (BucketRegion) r;
try {
KeyInfo keyInfo = new KeyInfo(k1, null, bucketId);
RawValue rv = br.getSerialized(keyInfo, false, false, null, null, false);
Object val = rv.getRawValue();
assertTrue(val instanceof CachedDeserializable);
CachedDeserializable cd = (CachedDeserializable) val;
SerializationCountingValue scv = (SerializationCountingValue) cd.getDeserializedForReading();
assertEquals(1, scv.count.get());
} catch (IOException fail) {
Assert.fail("Unexpected IOException", fail);
}
}
});
}
});
accessor.invoke(new CacheSerializableRunnable("assert accessor entry serialization count") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
// The counter was incremented once to send the data to the datastore
assertEquals(1, v1.count.get());
getCache().setCopyOnRead(true);
// Once to send the data to the datastore, no need to do a serialization
// when we make copy since it is serialized from datastore to us.
SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
assertEquals(1, v2.count.get());
assertTrue(v1 != v2);
}
});
datastore.invoke(new CacheSerializableRunnable("assert value serialization") {
public void run2() throws CacheException {
Region r = getRootRegion(rName);
SerializationCountingValue v1 = (SerializationCountingValue) r.get(k1);
// Once to send the value from the accessor to the data store
assertEquals(1, v1.count.get());
getCache().setCopyOnRead(true);
// Once to send the value from the accessor to the data store
// once to make a local copy
SerializationCountingValue v2 = (SerializationCountingValue) r.get(k1);
assertEquals(2, v2.count.get());
assertTrue(v1 != v2);
}
});
}
Aggregations