use of org.junit.rules.Timeout in project pravega by pravega.
the class HashTableSegmentLayoutTests method testRecovery.
/**
* Tests the ability to resume operations after a recovery event. Scenarios include:
* - Index is up-to-date ({@link TableAttributes#INDEX_OFFSET} equals Segment.Length.
* - Index is not up-to-date ({@link TableAttributes#INDEX_OFFSET} is less than Segment.Length.
*/
@Test
public void testRecovery() throws Exception {
// Generate a set of TestEntryData (List<TableEntry>, ExpectedResults.
// Process each TestEntryData in turn. After each time, re-create the Extension.
// Verify gets are blocked on indexing. Then index, verify unblocked and then re-create the Extension, and verify again.
val recoveryConfig = TableExtensionConfig.builder().with(TableExtensionConfig.MAX_TAIL_CACHE_PREINDEX_BATCH_SIZE, (MAX_KEY_LENGTH + MAX_VALUE_LENGTH) * 11).build();
@Cleanup val context = new TableContext(recoveryConfig, executorService());
// Create the Segment.
context.ext.createSegment(SEGMENT_NAME, SegmentType.TABLE_SEGMENT_HASH, TIMEOUT).join();
// Close the initial extension, as we don't need it anymore.
context.ext.close();
// Generate test data (in update & remove batches).
val data = generateTestData(context);
// Process each such batch in turn.
for (int i = 0; i < data.size(); i++) {
val current = data.get(i);
// of this is writing the data to the Segment.
try (val ext = context.createExtension()) {
val toUpdate = current.toUpdate.entrySet().stream().map(e -> toUnconditionalTableEntry(e.getKey(), e.getValue(), 0)).collect(Collectors.toList());
ext.put(SEGMENT_NAME, toUpdate, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val toRemove = current.toRemove.stream().map(k -> toUnconditionalKey(k, 0)).collect(Collectors.toList());
ext.remove(SEGMENT_NAME, toRemove, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
// Create a new instance of the extension (which simulates a recovery) and verify it exhibits the correct behavior.
try (val ext = context.createExtension()) {
// We should have unindexed data.
long lastIndexedOffset = context.segment().getInfo().getAttributes().get(TableAttributes.INDEX_OFFSET);
long segmentLength = context.segment().getInfo().getLength();
AssertExtensions.assertGreaterThan("Expected some unindexed data.", lastIndexedOffset, segmentLength);
// This ensures that last iteration uses the processor.
boolean useProcessor = i % 2 == 0;
// Verify get requests are blocked.
val key1 = current.expectedEntries.keySet().stream().findFirst().orElse(null);
val get1 = ext.get(SEGMENT_NAME, Collections.singletonList(key1), TIMEOUT);
val getResult1 = get1.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected completion result for recovered get.", current.expectedEntries.get(key1), getResult1.get(0).getValue());
if (useProcessor) {
// Create, populate, and flush the processor.
@Cleanup val processor = (WriterTableProcessor) ext.createWriterSegmentProcessors(context.segment().getMetadata()).stream().findFirst().orElse(null);
addToProcessor(lastIndexedOffset, (int) (segmentLength - lastIndexedOffset), processor);
processor.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertFalse("Unexpected result from WriterTableProcessor.mustFlush() after flushing.", processor.mustFlush());
}
}
}
// Verify final result. We create yet another extension here, and purposefully do not instantiate any writer processors;
// we want to make sure the data are accessible even without that being created (since the indexing is all caught up).
@Cleanup val ext2 = context.createExtension();
check(data.get(data.size() - 1).expectedEntries, Collections.emptyList(), ext2);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerEventProcessorTests method testInitializationException.
/**
* Check that if the creation of the EventProcessor fails, the future is completed exceptionally.
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testInitializationException() throws Exception {
AtomicBoolean induceFailure = new AtomicBoolean(true);
Function<String, CompletableFuture<DirectSegmentAccess>> failingSegmentSupplier = s -> induceFailure.getAndSet(!induceFailure.get()) ? CompletableFuture.failedFuture(new IntentionalException()) : CompletableFuture.completedFuture(new SegmentMock(this.executorService()));
@Cleanup ContainerEventProcessorImpl eventProcessorService = new ContainerEventProcessorImpl(0, failingSegmentSupplier, ITERATION_DELAY, CONTAINER_OPERATION_TIMEOUT, this.executorService());
int maxItemsProcessed = 10;
int maxOutstandingBytes = 4 * 1024 * 1024;
int truncationDataSize = 500;
ContainerEventProcessor.EventProcessorConfig config = new ContainerEventProcessor.EventProcessorConfig(maxItemsProcessed, maxOutstandingBytes, truncationDataSize);
// Verify that if the creation of the EventProcessor takes too long, the future completes exceptionally.
AssertExtensions.assertFutureThrows("Expected future exceptionally complete with IntentionalException", eventProcessorService.forConsumer("testExceptionForConsumer", l -> null, config), ex -> ex instanceof IntentionalException);
// If the call has failed, the future for that EventProcessor should have been removed from the map.
Assert.assertNull(eventProcessorService.getEventProcessorMap().get("testExceptionForConsumer"));
// The next call is expected to succeed, so the future should be in the map when this call completes.
Assert.assertNotNull(eventProcessorService.forConsumer("testExceptionForConsumer", l -> null, config).join());
Assert.assertNotNull(eventProcessorService.getEventProcessorMap().get("testExceptionForConsumer"));
AssertExtensions.assertFutureThrows("Expected future exceptionally complete with IntentionalException", eventProcessorService.forDurableQueue("testExceptionForDurableQueue"), ex -> ex instanceof IntentionalException);
Assert.assertNull(eventProcessorService.getEventProcessorMap().get("testExceptionForDurableQueue"));
Assert.assertNotNull(eventProcessorService.forDurableQueue("testExceptionForDurableQueue").join());
Assert.assertNotNull(eventProcessorService.getEventProcessorMap().get("testExceptionForDurableQueue"));
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerEventProcessorTests method testAppendWithFailingSegment.
/**
* Check the behavior of the EventProcessor when there are failures when adding events to the internal Segment.
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testAppendWithFailingSegment() throws Exception {
DirectSegmentAccess faultySegment = mock(SegmentMock.class);
when(faultySegment.append(any(), any(), any())).thenThrow(NullPointerException.class);
SegmentMetadata mockMetadata = mock(SegmentMetadata.class);
when(mockMetadata.getLength()).thenReturn(0L);
when(faultySegment.getInfo()).thenReturn(mockMetadata);
Function<String, CompletableFuture<DirectSegmentAccess>> faultySegmentSupplier = s -> CompletableFuture.completedFuture(faultySegment);
@Cleanup ContainerEventProcessor eventProcessorService = new ContainerEventProcessorImpl(0, faultySegmentSupplier, ITERATION_DELAY, CONTAINER_OPERATION_TIMEOUT, this.executorService());
int maxItemsProcessed = 10;
int maxOutstandingBytes = 4 * 1024 * 1024;
int truncationDataSize = 500;
ContainerEventProcessor.EventProcessorConfig config = new ContainerEventProcessor.EventProcessorConfig(maxItemsProcessed, maxOutstandingBytes, truncationDataSize);
Function<List<BufferView>, CompletableFuture<Void>> doNothing = l -> null;
@Cleanup ContainerEventProcessor.EventProcessor processor = eventProcessorService.forConsumer("testSegmentMax", doNothing, config).get(TIMEOUT_FUTURE.toSeconds(), TimeUnit.SECONDS);
// Verify that the client gets the exception if there is some issue on add().
BufferView event = new ByteArraySegment("Test".getBytes());
AssertExtensions.assertThrows(NullPointerException.class, () -> processor.add(event, TIMEOUT_FUTURE).join());
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class ContainerEventProcessorTests method testEventProcessorClose.
/**
* Test closing the EventProcessor.
*
* @throws Exception
*/
@Test(timeout = 30000)
public void testEventProcessorClose() throws Exception {
@Cleanup ContainerEventProcessor eventProcessorService = new ContainerEventProcessorImpl(0, mockSegmentSupplier(), ITERATION_DELAY, CONTAINER_OPERATION_TIMEOUT, this.executorService());
int maxItemsProcessed = 10;
int maxOutstandingBytes = 4 * 1024 * 1024;
int truncationDataSize = 500;
ContainerEventProcessor.EventProcessorConfig config = new ContainerEventProcessor.EventProcessorConfig(maxItemsProcessed, maxOutstandingBytes, truncationDataSize);
AtomicLong processorResults = new AtomicLong(0);
Function<List<BufferView>, CompletableFuture<Void>> handler = l -> {
processorResults.addAndGet(l.size());
return CompletableFuture.completedFuture(null);
};
@Cleanup ContainerEventProcessor.EventProcessor processor = eventProcessorService.forConsumer("testClose", handler, config).get(TIMEOUT_FUTURE.toSeconds(), TimeUnit.SECONDS);
// Assert that the existing object in the ContainerEventProcessorImpl map is the same as the one we just instantiated.
Assert.assertEquals(processor, eventProcessorService.forConsumer("testClose", handler, config).get(TIMEOUT_FUTURE.toSeconds(), TimeUnit.SECONDS));
// Now, close the EventProcessor object, which should auto-unregister from the map.
processor.close();
// After that, we should see a new object being instantiated in ContainerEventProcessorImpl for the same name.
Assert.assertNotEquals(processor, eventProcessorService.forConsumer("testClose", handler, config).get(TIMEOUT_FUTURE.toSeconds(), TimeUnit.SECONDS));
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class AsyncStorageWrapperTests method testConcurrencyConcat.
/**
* Tests the segment-based concurrency when concat is involved. In particular, that a concat() will wait for any pending
* operations on each involved segment and that any subsequent operation on any of those segments will be queued up.
*/
@Test
public void testConcurrencyConcat() throws Exception {
final String segment1 = "Segment1";
final String segment2 = "Segment2";
final BiFunction<String, String, String> joiner = (op, segment) -> op + "|" + segment;
final String createSegment1Key = joiner.apply(TestStorage.CREATE, segment1);
final String createSegment2Key = joiner.apply(TestStorage.CREATE, segment2);
final String concatKey = joiner.apply(TestStorage.CONCAT, segment1 + "|" + segment2);
final String writeSegment1Key = joiner.apply(TestStorage.WRITE, segment1);
final String writeSegment2Key = joiner.apply(TestStorage.WRITE, segment2);
// Create a set of latches that can be used to detect when an operation was invoked and when to release it.
val invoked = new HashMap<String, ReusableLatch>();
val waitOn = new HashMap<String, ReusableLatch>();
invoked.put(createSegment1Key, new ReusableLatch());
invoked.put(createSegment2Key, new ReusableLatch());
invoked.put(concatKey, new ReusableLatch());
invoked.put(writeSegment1Key, new ReusableLatch());
invoked.put(writeSegment2Key, new ReusableLatch());
waitOn.put(createSegment1Key, new ReusableLatch());
waitOn.put(createSegment2Key, new ReusableLatch());
waitOn.put(concatKey, new ReusableLatch());
waitOn.put(writeSegment1Key, new ReusableLatch());
waitOn.put(writeSegment2Key, new ReusableLatch());
val innerStorage = new TestStorage((operation, segment) -> {
invoked.get(joiner.apply(operation, segment)).release();
Exceptions.handleInterrupted(() -> waitOn.get(joiner.apply(operation, segment)).await());
return null;
});
@Cleanup val s = new AsyncStorageWrapper(innerStorage, executorService());
// Issue two Create operations with the two segments and wait for both of them to be running.
val futures = new ArrayList<CompletableFuture<?>>();
futures.add(s.create(segment1, TIMEOUT));
futures.add(s.create(segment2, TIMEOUT));
invoked.get(createSegment1Key).await(LOCK_TIMEOUT_MILLIS);
invoked.get(createSegment2Key).await(LOCK_TIMEOUT_MILLIS);
Assert.assertEquals("Unexpected number of active segments.", 2, s.getSegmentWithOngoingOperationsCount());
// Initiate the concat and complete one of the original operations, and verify the concat did not start.
futures.add(s.concat(InMemoryStorage.newHandle(segment1, false), 0, segment2, TIMEOUT));
waitOn.get(createSegment1Key).release();
AssertExtensions.assertThrows("Concat was invoked while the at least one of the creates was running.", () -> invoked.get(concatKey).await(LOCK_TIMEOUT_MILLIS), ex -> ex instanceof TimeoutException);
// Finish up the "source" create and verify the concat is released.
waitOn.get(createSegment2Key).release();
invoked.get(concatKey).await(TIMEOUT_MILLIS);
// Add more operations after the concat and verify they are queued up (that they haven't started).
futures.add(s.write(InMemoryStorage.newHandle(segment1, false), 0, new ByteArrayInputStream(new byte[0]), 0, TIMEOUT));
futures.add(s.write(InMemoryStorage.newHandle(segment2, false), 0, new ByteArrayInputStream(new byte[0]), 0, TIMEOUT));
AssertExtensions.assertThrows("Write(target) was invoked while concat was running", () -> invoked.get(writeSegment1Key).await(LOCK_TIMEOUT_MILLIS), ex -> ex instanceof TimeoutException);
AssertExtensions.assertThrows("Write(source) was invoked while concat was running", () -> invoked.get(writeSegment2Key).await(LOCK_TIMEOUT_MILLIS), ex -> ex instanceof TimeoutException);
Assert.assertEquals("Unexpected number of active segments.", 2, s.getSegmentWithOngoingOperationsCount());
// Finish up the concat and verify the two writes are released.
waitOn.get(concatKey).release();
invoked.get(writeSegment1Key).await(LOCK_TIMEOUT_MILLIS);
invoked.get(writeSegment2Key).await(LOCK_TIMEOUT_MILLIS);
waitOn.get(writeSegment1Key).release();
waitOn.get(writeSegment2Key).release();
allOf(futures).get(TIMEOUT_MILLIS, TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of active segments.", 0, s.getSegmentWithOngoingOperationsCount());
}
Aggregations