use of org.infinispan.test.fwk.CheckPoint in project infinispan by infinispan.
the class LockingTest method injectBlockingCommandInterceptor.
private CheckPoint injectBlockingCommandInterceptor(String cacheName) {
final CheckPoint checkPoint = new CheckPoint();
TestingUtil.extractInterceptorChain(cache(cacheName)).addInterceptorBefore(new BaseCustomAsyncInterceptor() {
private final AtomicBoolean first = new AtomicBoolean(false);
@Override
public Object visitPutKeyValueCommand(InvocationContext ctx, PutKeyValueCommand command) {
if (first.compareAndSet(false, true)) {
checkPoint.trigger("before-block");
return asyncInvokeNext(ctx, command, checkPoint.future("block", 30, TimeUnit.SECONDS, testExecutor()));
}
return invokeNext(ctx, command);
}
}, CallInterceptor.class);
return checkPoint;
}
use of org.infinispan.test.fwk.CheckPoint in project infinispan by infinispan.
the class TxInvalidationLockingTest method testOptimisticPrepareAcquiresGlobalLock.
public void testOptimisticPrepareAcquiresGlobalLock() throws Exception {
CheckPoint checkPoint = new CheckPoint();
Future<Void> tx2Future;
Cache<Object, Object> cache1 = cache(0, OPTIMISTIC_CACHE);
tm(cache1).begin();
EmbeddedTransaction tx1 = null;
try {
Object initialValue = cache1.put(KEY, VALUE1);
assertNull(initialValue);
tx1 = (EmbeddedTransaction) tm(cache1).getTransaction();
tx1.runPrepare();
tx2Future = fork(() -> {
AdvancedCache<Object, Object> cache2 = advancedCache(1, OPTIMISTIC_CACHE);
tm(cache2).begin();
try {
assertNull(cache2.get(KEY));
checkPoint.trigger("tx2_read");
cache2.put(KEY, VALUE2);
} finally {
tm(cache2).commit();
}
});
checkPoint.awaitStrict("tx2_read", 10, TimeUnit.SECONDS);
Thread.sleep(10);
assertFalse(tx2Future.isDone());
} finally {
if (tx1 != null) {
tx1.runCommit(false);
}
}
// No WriteSkewException
tx2Future.get(30, TimeUnit.SECONDS);
assertEquals(VALUE2, cache1.get(KEY));
}
use of org.infinispan.test.fwk.CheckPoint in project infinispan by infinispan.
the class DistributedStreamIteratorTest method verifyNodeLeavesAfterSendingBackSomeData.
/**
* This test is to verify proper behavior when a node dies after sending a batch to the requestor
*/
@Test
public void verifyNodeLeavesAfterSendingBackSomeData() throws TimeoutException, InterruptedException, ExecutionException {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Map<Object, String> values = new HashMap<>();
int chunkSize = cache0.getCacheConfiguration().clustering().stateTransfer().chunkSize();
// Now insert 2 more values than the chunk size into the node we will kill
for (int i = 0; i < chunkSize + 2; ++i) {
MagicKey key = new MagicKey(cache1);
cache1.put(key, key.toString());
values.put(key, key.toString());
}
CheckPoint checkPoint = new CheckPoint();
// Let the first request come through fine
checkPoint.trigger(Mocks.BEFORE_RELEASE);
waitUntilSendingResponse(cache1, checkPoint);
final BlockingQueue<Map.Entry<Object, String>> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
Iterator<Map.Entry<Object, String>> iter = cache0.entrySet().stream().iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Now wait for them to send back first results
checkPoint.awaitStrict(Mocks.AFTER_INVOCATION, 10, TimeUnit.SECONDS);
checkPoint.trigger(Mocks.AFTER_RELEASE);
// We should get a value now, note all values are currently residing on cache1 as primary
Map.Entry<Object, String> value = returnQueue.poll(10, TimeUnit.SECONDS);
// Now kill the cache - we should recover
killMember(1, CACHE_NAME);
future.get(10, TimeUnit.SECONDS);
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertTrue("Entry wasn't found:" + entry, returnQueue.contains(entry) || entry.equals(value));
}
}
use of org.infinispan.test.fwk.CheckPoint in project infinispan by infinispan.
the class DistributedStreamIteratorTest method testIterationDuringInitialTransfer.
@Test
public void testIterationDuringInitialTransfer() throws Exception {
Map<Object, String> values = putValueInEachCache(3);
// Go back to 2 caches, because we assign all 3 segments to the first 3 nodes
// And we need the joiner to request some state in order to block it
killMember(2, CACHE_NAME);
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever(Mocks.AFTER_RELEASE);
blockStateTransfer(cache0, checkPoint);
EmbeddedCacheManager joinerManager = addClusterEnabledCacheManager(sci, new ConfigurationBuilder(), new TransportFlags().withFD(true));
ConfigurationBuilder builderNoAwaitInitialTransfer = new ConfigurationBuilder();
builderNoAwaitInitialTransfer.read(builderUsed.build());
builderNoAwaitInitialTransfer.clustering().stateTransfer().awaitInitialTransfer(false);
joinerManager.defineConfiguration(CACHE_NAME, builderNoAwaitInitialTransfer.build());
Cache<String, String> joinerCache = joinerManager.getCache(CACHE_NAME, true);
// Not required, but it should make the logs clearer
checkPoint.awaitStrict(Mocks.BEFORE_INVOCATION, 10, TimeUnit.SECONDS);
Set<String> iteratorValues = new HashSet<>();
try {
Iterator<String> iter = joinerCache.entrySet().stream().map(Map.Entry::getValue).iterator();
while (iter.hasNext()) {
String value = iter.next();
iteratorValues.add(value);
}
} finally {
checkPoint.triggerForever(Mocks.BEFORE_RELEASE);
}
for (Map.Entry<Object, String> entry : values.entrySet()) {
assertTrue("Entry wasn't found:" + entry, iteratorValues.contains(entry.getValue()));
}
}
use of org.infinispan.test.fwk.CheckPoint in project infinispan by infinispan.
the class DistributedStreamIteratorTest method waitUntilProcessingResults.
@Test
public void waitUntilProcessingResults() throws TimeoutException, InterruptedException, ExecutionException {
Cache<Object, String> cache0 = cache(0, CACHE_NAME);
Cache<Object, String> cache1 = cache(1, CACHE_NAME);
Map<Object, String> values = new HashMap<>();
for (int i = 0; i < 9; ++i) {
MagicKey key = new MagicKey(cache1);
cache1.put(key, key.toString());
values.put(key, key.toString());
}
CheckPoint checkPoint = new CheckPoint();
checkPoint.triggerForever(Mocks.AFTER_RELEASE);
ClusterPublisherManager<Object, String> spy = Mocks.replaceComponentWithSpy(cache0, ClusterPublisherManager.class);
doAnswer(invocation -> {
SegmentPublisherSupplier<?> result = (SegmentPublisherSupplier<?>) invocation.callRealMethod();
return Mocks.blockingPublisher(result, checkPoint);
}).when(spy).entryPublisher(any(), any(), any(), anyLong(), any(), anyInt(), any());
final BlockingQueue<Map.Entry<Object, String>> returnQueue = new LinkedBlockingQueue<>();
Future<Void> future = fork(() -> {
Iterator<Map.Entry<Object, String>> iter = cache0.entrySet().stream().iterator();
while (iter.hasNext()) {
Map.Entry<Object, String> entry = iter.next();
returnQueue.add(entry);
}
return null;
});
// Now wait for them to send back first results but don't let them process
checkPoint.awaitStrict(Mocks.BEFORE_INVOCATION, 10, TimeUnit.SECONDS);
// Now let them process the results
checkPoint.triggerForever(Mocks.BEFORE_RELEASE);
// Now kill the cache - we should recover and get appropriate values
killMember(1, CACHE_NAME);
future.get(10, TimeUnit.SECONDS);
KeyPartitioner keyPartitioner = TestingUtil.extractComponent(cache0, KeyPartitioner.class);
Map<Integer, Set<Map.Entry<Object, String>>> expected = generateEntriesPerSegment(keyPartitioner, values.entrySet());
Map<Integer, Set<Map.Entry<Object, String>>> answer = generateEntriesPerSegment(keyPartitioner, returnQueue);
for (Map.Entry<Integer, Set<Map.Entry<Object, String>>> entry : expected.entrySet()) {
Integer segment = entry.getKey();
Set<Map.Entry<Object, String>> answerForSegment = answer.get(segment);
if (answerForSegment != null) {
for (Map.Entry<Object, String> exp : entry.getValue()) {
if (!answerForSegment.contains(exp)) {
log.errorf("Segment %d, missing %s", segment, exp);
}
}
for (Map.Entry<Object, String> ans : answerForSegment) {
if (!entry.getValue().contains(ans)) {
log.errorf("Segment %d, extra %s", segment, ans);
}
}
assertEquals(entry.getValue().size(), answerForSegment.size());
}
assertEquals("Segment " + segment + " had a mismatch", entry.getValue(), answerForSegment);
}
}
Aggregations