use of org.junit.rules.Timeout in project pravega by pravega.
the class StreamSegmentContainerTests method testFutureReads.
/**
* Tests the ability to perform future (tail) reads. Scenarios tested include:
* * Regular appends
* * Segment sealing
* * Transaction merging.
*/
@Test
public void testFutureReads() throws Exception {
final int nonSealReadLimit = 100;
@Cleanup TestContext context = createContext();
context.container.startAsync().awaitRunning();
// 1. Create the StreamSegments.
ArrayList<String> segmentNames = createSegments(context);
HashMap<String, ArrayList<String>> transactionsBySegment = createTransactions(segmentNames, context);
activateAllSegments(segmentNames, context);
transactionsBySegment.values().forEach(s -> activateAllSegments(s, context));
HashMap<String, ReadResult> readsBySegment = new HashMap<>();
ArrayList<AsyncReadResultProcessor> readProcessors = new ArrayList<>();
HashSet<String> segmentsToSeal = new HashSet<>();
HashMap<String, ByteArrayOutputStream> readContents = new HashMap<>();
HashMap<String, TestReadResultHandler> entryHandlers = new HashMap<>();
// should stop upon reaching the limit).
for (int i = 0; i < segmentNames.size(); i++) {
String segmentName = segmentNames.get(i);
ByteArrayOutputStream readContentsStream = new ByteArrayOutputStream();
readContents.put(segmentName, readContentsStream);
ReadResult readResult;
if (i < segmentNames.size() / 2) {
// We're going to seal this one at one point.
segmentsToSeal.add(segmentName);
readResult = context.container.read(segmentName, 0, Integer.MAX_VALUE, TIMEOUT).join();
} else {
// Just a regular one, nothing special.
readResult = context.container.read(segmentName, 0, nonSealReadLimit, TIMEOUT).join();
}
// The Read callback is only accumulating data in this test; we will then compare it against the real data.
TestReadResultHandler entryHandler = new TestReadResultHandler(readContentsStream, TIMEOUT);
entryHandlers.put(segmentName, entryHandler);
readsBySegment.put(segmentName, readResult);
readProcessors.add(AsyncReadResultProcessor.process(readResult, entryHandler, executorService()));
}
// 3. Add some appends.
HashMap<String, Long> lengths = new HashMap<>();
HashMap<String, ByteArrayOutputStream> segmentContents = new HashMap<>();
appendToParentsAndTransactions(segmentNames, transactionsBySegment, lengths, segmentContents, context);
// 4. Merge all the Transactions.
Futures.allOf(mergeTransactions(transactionsBySegment, lengths, segmentContents, context, false)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// 5. Add more appends (to the parent segments)
ArrayList<CompletableFuture<Void>> operationFutures = new ArrayList<>();
for (int i = 0; i < 5; i++) {
for (String segmentName : segmentNames) {
RefCountByteArraySegment appendData = getAppendData(segmentName, APPENDS_PER_SEGMENT + i);
operationFutures.add(Futures.toVoid(context.container.append(segmentName, appendData, null, TIMEOUT)));
lengths.put(segmentName, lengths.getOrDefault(segmentName, 0L) + appendData.getLength());
recordAppend(segmentName, appendData, segmentContents, null);
}
}
segmentsToSeal.forEach(segmentName -> operationFutures.add(Futures.toVoid(context.container.sealStreamSegment(segmentName, TIMEOUT))));
Futures.allOf(operationFutures).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Now wait for all the reads to complete, and verify their results against the expected output.
Futures.allOf(entryHandlers.values().stream().map(h -> h.getCompleted()).collect(Collectors.toList())).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
readProcessors.forEach(AsyncReadResultProcessor::close);
// Check to see if any errors got thrown (and caught) during the reading process).
for (Map.Entry<String, TestReadResultHandler> e : entryHandlers.entrySet()) {
Throwable err = e.getValue().getError().get();
if (err != null) {
// The next check (see below) will verify if the segments were properly read).
if (!(err instanceof StreamSegmentSealedException && segmentsToSeal.contains(e.getKey()))) {
Assert.fail("Unexpected error happened while processing Segment " + e.getKey() + ": " + e.getValue().getError().get());
}
}
}
// Check that all the ReadResults are closed
for (Map.Entry<String, ReadResult> e : readsBySegment.entrySet()) {
Assert.assertTrue("Read result is not closed for segment " + e.getKey(), e.getValue().isClosed());
}
// Compare, byte-by-byte, the outcome of the tail reads.
Assert.assertEquals("Unexpected number of segments were read.", segmentContents.size(), readContents.size());
for (String segmentName : segmentNames) {
boolean isSealed = segmentsToSeal.contains(segmentName);
byte[] expectedData = segmentContents.get(segmentName).toByteArray();
byte[] actualData = readContents.get(segmentName).toByteArray();
int expectedLength = isSealed ? (int) (long) lengths.get(segmentName) : nonSealReadLimit;
Assert.assertEquals("Unexpected read length for segment " + segmentName, expectedLength, actualData.length);
AssertExtensions.assertArrayEquals("Unexpected read contents for segment " + segmentName, expectedData, 0, actualData, 0, actualData.length);
}
// 6. Writer moving data to Storage.
waitForSegmentsInStorage(segmentNames, context).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
checkStorage(segmentContents, lengths, context);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class DurableLogTests method testRecoveryWithIncrementalCheckpoints.
/**
* Tests the DurableLog recovery process when there are multiple {@link MetadataCheckpointOperation}s added, with each
* such checkpoint including information about evicted segments or segments which had their storage state modified.
*/
@Test
public void testRecoveryWithIncrementalCheckpoints() throws Exception {
final int streamSegmentCount = 50;
// Setup a DurableLog and start it.
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()));
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
// First DurableLog. We use this for generating data.
val metadata1 = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
List<Long> deletedIds;
Set<Long> evictIds;
try (ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata1, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata1, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Create some segments.
val segmentIds = new ArrayList<>(createStreamSegmentsWithOperations(streamSegmentCount, durableLog));
deletedIds = segmentIds.subList(0, 5);
val mergedFromIds = segmentIds.subList(5, 10);
// Must be same length as mergeFrom
val mergedToIds = segmentIds.subList(10, 15);
evictIds = new HashSet<>(segmentIds.subList(15, 20));
val changeStorageStateIds = segmentIds.subList(20, segmentIds.size() - 5);
// Append something to each segment.
for (val segmentId : segmentIds) {
if (!evictIds.contains(segmentId)) {
durableLog.add(new StreamSegmentAppendOperation(segmentId, generateAppendData((int) (long) segmentId), null), OperationPriority.Normal, TIMEOUT).join();
}
}
// Checkpoint 1.
durableLog.checkpoint(TIMEOUT).join();
// Delete some segments.
for (val segmentId : deletedIds) {
durableLog.add(new DeleteSegmentOperation(segmentId), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 2.
durableLog.checkpoint(TIMEOUT).join();
// Merge some segments.
for (int i = 0; i < mergedFromIds.size(); i++) {
durableLog.add(new StreamSegmentSealOperation(mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
durableLog.add(new MergeSegmentOperation(mergedToIds.get(i), mergedFromIds.get(i)), OperationPriority.Normal, TIMEOUT).join();
}
// Checkpoint 3.
durableLog.checkpoint(TIMEOUT).join();
// Evict some segments.
val evictableContainerMetadata = (EvictableMetadata) metadata1;
metadata1.removeTruncationMarkers(metadata1.getOperationSequenceNumber());
val toEvict = evictableContainerMetadata.getEvictionCandidates(Integer.MAX_VALUE, segmentIds.size()).stream().filter(m -> evictIds.contains(m.getId())).collect(Collectors.toList());
val evicted = evictableContainerMetadata.cleanup(toEvict, Integer.MAX_VALUE);
AssertExtensions.assertContainsSameElements("", evictIds, evicted.stream().map(SegmentMetadata::getId).collect(Collectors.toList()));
// Checkpoint 4.
durableLog.checkpoint(TIMEOUT).join();
// Update storage state for some segments.
for (val segmentId : changeStorageStateIds) {
val sm = metadata1.getStreamSegmentMetadata(segmentId);
if (segmentId % 3 == 0) {
sm.setStorageLength(sm.getLength());
}
if (segmentId % 4 == 0) {
sm.markSealed();
sm.markSealedInStorage();
}
if (segmentId % 5 == 0) {
sm.markDeleted();
sm.markDeletedInStorage();
}
}
// Checkpoint 5.
durableLog.checkpoint(TIMEOUT).join();
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
// Second DurableLog. We use this for recovery.
val metadata2 = new MetadataBuilder(CONTAINER_ID).build();
try (ContainerReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata2, storage, cacheManager, executorService());
DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata2, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Validate metadata matches.
val expectedSegmentIds = metadata1.getAllStreamSegmentIds();
val actualSegmentIds = metadata2.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Unexpected set of recovered segments. Only Active segments expected to have been recovered.", expectedSegmentIds, actualSegmentIds);
val expectedSegments = expectedSegmentIds.stream().sorted().map(metadata1::getStreamSegmentMetadata).collect(Collectors.toList());
val actualSegments = actualSegmentIds.stream().sorted().map(metadata2::getStreamSegmentMetadata).collect(Collectors.toList());
for (int i = 0; i < expectedSegments.size(); i++) {
val e = expectedSegments.get(i);
val a = actualSegments.get(i);
SegmentMetadataComparer.assertEquals("Recovered segment metadata mismatch", e, a);
}
// Validate read index is as it should. Here, we can only check if the read indices for evicted segments are
// no longer loaded; we do more thorough checks in the ContainerReadIndexTests suite.
Streams.concat(evictIds.stream(), deletedIds.stream()).forEach(segmentId -> Assert.assertNull("Not expecting a read index for an evicted or deleted segment.", readIndex.getIndex(segmentId)));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class OperationProcessorTests method testWithInvalidOperations.
/**
* Tests the ability of the OperationProcessor to process Operations when encountering invalid operations (such as
* appends to StreamSegments that do not exist or to those that are sealed). This covers the following exceptions:
* * StreamSegmentNotExistsException
* * StreamSegmentSealedException
* * General MetadataUpdateException.
*/
@Test
public void testWithInvalidOperations() throws Exception {
int streamSegmentCount = 10;
int appendsPerStreamSegment = 40;
// We are going to prematurely seal this StreamSegment.
long sealedStreamSegmentId = 6;
// We are going to prematurely mark this StreamSegment as deleted.
long deletedStreamSegmentId = 8;
// This is a bogus StreamSegment, that does not exist.
long nonExistentStreamSegmentId;
@Cleanup TestContext context = new TestContext();
// Generate some test data (no need to complicate ourselves with Transactions here; that is tested in the no-failure test).
HashSet<Long> streamSegmentIds = createStreamSegmentsInMetadata(streamSegmentCount, context.metadata);
nonExistentStreamSegmentId = streamSegmentIds.size();
streamSegmentIds.add(nonExistentStreamSegmentId);
context.metadata.getStreamSegmentMetadata(sealedStreamSegmentId).markSealed();
context.metadata.getStreamSegmentMetadata(deletedStreamSegmentId).markDeleted();
List<Operation> operations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Setup an OperationProcessor and start it.
@Cleanup TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, MAX_DATA_LOG_APPEND_SIZE, executorService());
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), getDefaultThrottlerSettings(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
List<OperationWithCompletion> completionFutures = processOperations(operations, operationProcessor);
// Wait for all such operations to complete. We are expecting exceptions, so verify that we do.
AssertExtensions.assertThrows("No operations failed.", OperationWithCompletion.allOf(completionFutures)::join, ex -> ex instanceof MetadataUpdateException || ex instanceof StreamSegmentException);
HashSet<Long> streamSegmentsWithNoContents = new HashSet<>();
streamSegmentsWithNoContents.add(sealedStreamSegmentId);
streamSegmentsWithNoContents.add(deletedStreamSegmentId);
streamSegmentsWithNoContents.add(nonExistentStreamSegmentId);
// Verify that the "right" operations failed, while the others succeeded.
for (OperationWithCompletion oc : completionFutures) {
if (oc.operation instanceof StorageOperation) {
long streamSegmentId = ((StorageOperation) oc.operation).getStreamSegmentId();
if (streamSegmentsWithNoContents.contains(streamSegmentId)) {
Assert.assertTrue("Completion future for invalid StreamSegment " + streamSegmentId + " did not complete exceptionally.", oc.completion.isCompletedExceptionally());
Predicate<Throwable> errorValidator;
if (streamSegmentId == sealedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentSealedException;
} else if (streamSegmentId == deletedStreamSegmentId) {
errorValidator = ex -> ex instanceof StreamSegmentNotExistsException;
} else {
errorValidator = ex -> ex instanceof MetadataUpdateException;
}
AssertExtensions.assertThrows("Unexpected exception for failed Operation.", oc.completion::join, errorValidator);
continue;
}
}
// If we get here, we must verify no exception was thrown.
oc.completion.join();
}
performLogOperationChecks(completionFutures, context.memoryLog, dataLog, context.metadata);
performMetadataChecks(streamSegmentIds, streamSegmentsWithNoContents, new HashMap<>(), completionFutures, context.metadata, false, false);
performReadIndexChecks(completionFutures, context.readIndex);
operationProcessor.stopAsync().awaitTerminated();
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class DataFrameReaderTests method testReadsWithPartialEntries.
/**
* Tests the case when we begin reading from a DataFrame which begins with a partial record. That record needs to
* be dropped (not returned). DataFrameReader should always return full records.
*/
@Test
public void testReadsWithPartialEntries() throws Exception {
// This test will only work if LARGE_RECORD_MIN_SIZE > FRAME_SIZE.
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(3, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MIN_SIZE, 0);
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
ArrayList<DataFrameBuilder.CommitArgs> commitFrames = new ArrayList<>();
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, commitFrames::add, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (TestLogItem r : records) {
b.append(r);
}
b.flush();
}
// Delete the first entry in the DataLog.
ArrayList<Integer> failedIndices = new ArrayList<>();
dataLog.truncate(commitFrames.get(0).getLogAddress(), TIMEOUT).join();
// Given that each TestLogItem's length is larger than a data frame, truncating the first DataFrame will
// invalidate the first one.
failedIndices.add(0);
TestSerializer logItemFactory = new TestSerializer();
DataFrameReader<TestLogItem> reader = new DataFrameReader<>(dataLog, logItemFactory, CONTAINER_ID);
List<TestLogItem> readItems = readAll(reader);
checkReadResult(records, failedIndices, readItems);
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class DataFrameReaderTests method testReadsNoFailure.
/**
* Tests the happy case: DataFrameReader can read from a DataLog when the are no exceptions.
*/
@Test
public void testReadsNoFailure() throws Exception {
// Fail every X records (write-wise).
int failEvery = 7;
ArrayList<TestLogItem> records = DataFrameTestHelpers.generateLogItems(100, SMALL_RECORD_MIN_SIZE, SMALL_RECORD_MAX_SIZE, 0);
records.addAll(DataFrameTestHelpers.generateLogItems(100, LARGE_RECORD_MIN_SIZE, LARGE_RECORD_MAX_SIZE, records.size()));
// Have every other 'failEvery' record fail after writing 90% of itself.
for (int i = 0; i < records.size(); i += failEvery) {
records.get(i).failSerializationAfterComplete(0.9, new IOException("intentional " + i));
}
HashSet<Integer> failedIndices = new HashSet<>();
try (TestDurableDataLog dataLog = TestDurableDataLog.create(CONTAINER_ID, FRAME_SIZE, executorService())) {
dataLog.initialize(TIMEOUT);
BiConsumer<Throwable, DataFrameBuilder.CommitArgs> errorCallback = (ex, a) -> Assert.fail(String.format("Unexpected error occurred upon commit. %s", ex));
val args = new DataFrameBuilder.Args(Callbacks::doNothing, Callbacks::doNothing, errorCallback, executorService());
try (DataFrameBuilder<TestLogItem> b = new DataFrameBuilder<>(dataLog, SERIALIZER, args)) {
for (int i = 0; i < records.size(); i++) {
try {
b.append(records.get(i));
} catch (IOException ex) {
failedIndices.add(i);
}
}
b.flush();
}
TestSerializer logItemFactory = new TestSerializer();
DataFrameReader<TestLogItem> reader = new DataFrameReader<>(dataLog, logItemFactory, CONTAINER_ID);
List<TestLogItem> readItems = readAll(reader);
checkReadResult(records, failedIndices, readItems);
}
}
Aggregations