use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testFlushEmptyAppend.
/**
* Tests the behavior of flush() empty appends.
*/
@Test
public void testFlushEmptyAppend() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
val rnd = new Random(0);
final byte[] initialBytes = new byte[config.getMaxFlushSizeBytes()];
final byte[] mergedBytes = new byte[100];
final int segmentLength = initialBytes.length + mergedBytes.length;
rnd.nextBytes(initialBytes);
rnd.nextBytes(mergedBytes);
@Cleanup TestContext context = new TestContext(config);
// Create a segment in Storage.
context.storage.create(SEGMENT_NAME, TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
val metadata = (UpdateableSegmentMetadata) context.segmentAggregator.getMetadata();
metadata.setLength(segmentLength);
// First append fills up the max limit for the AggregatedAppend buffer.
val append1 = new StreamSegmentAppendOperation(SEGMENT_ID, new ByteArraySegment(initialBytes), null);
append1.setStreamSegmentOffset(0);
append1.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(append1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(append1));
// Second append is empty.
val emptyAppend = new StreamSegmentAppendOperation(SEGMENT_ID, BufferView.empty(), null);
emptyAppend.setStreamSegmentOffset(initialBytes.length);
emptyAppend.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(emptyAppend);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(emptyAppend));
// Create a source segment.
val sourceAggregator = context.transactionAggregators[0];
val sourceMetadata = (UpdateableSegmentMetadata) sourceAggregator.getMetadata();
sourceMetadata.setLength(mergedBytes.length);
sourceMetadata.setStorageLength(mergedBytes.length);
context.storage.create(sourceMetadata.getName(), TIMEOUT).join();
context.storage.openWrite(sourceMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(mergedBytes), mergedBytes.length, TIMEOUT).thenCompose(v -> context.storage.seal(handle, TIMEOUT))).join();
// And include it via a Merge Op.
sourceMetadata.markSealed();
sourceMetadata.markSealedInStorage();
sourceMetadata.markMerged();
val mergeOp = new MergeSegmentOperation(SEGMENT_ID, sourceMetadata.getId());
mergeOp.setStreamSegmentOffset(initialBytes.length);
mergeOp.setLength(sourceMetadata.getLength());
mergeOp.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.segmentAggregator.add(mergeOp);
// Flush, and verify the result.
val flushResult1 = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes flushed", initialBytes.length, flushResult1.getFlushedBytes());
Assert.assertEquals("Unexpected number of bytes merged", mergedBytes.length, flushResult1.getMergedBytes());
byte[] expectedData = new byte[initialBytes.length + mergedBytes.length];
System.arraycopy(initialBytes, 0, expectedData, 0, initialBytes.length);
System.arraycopy(mergedBytes, 0, expectedData, initialBytes.length, mergedBytes.length);
verifySegmentData(expectedData, context);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class SegmentAggregatorTests method testFlushAppendWithStorageErrors.
/**
* Tests the behavior of flush() with appends and storage errors (on the write() method).
*/
@Test
public void testFlushAppendWithStorageErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int appendCount = config.getFlushThresholdBytes() * 10;
final int failSyncEvery = 2;
final int failAsyncEvery = 3;
@Cleanup TestContext context = new TestContext(config);
context.segmentAggregator.initialize(TIMEOUT).join();
// Have the writes fail every few attempts with a well known exception.
AtomicReference<IntentionalException> setException = new AtomicReference<>();
Supplier<Exception> exceptionSupplier = () -> {
IntentionalException ex = new IntentionalException(Long.toString(context.timer.getElapsedMillis()));
setException.set(ex);
return ex;
};
context.storage.setWriteSyncErrorInjector(new ErrorInjector<>(count -> count % failSyncEvery == 0, exceptionSupplier));
context.storage.setWriteAsyncErrorInjector(new ErrorInjector<>(count -> count % failAsyncEvery == 0, exceptionSupplier));
@Cleanup ByteArrayOutputStream writtenData = new ByteArrayOutputStream();
// Part 1: flush triggered by accumulated size.
int exceptionCount = 0;
for (int i = 0; i < appendCount; i++) {
// Add another operation and record its length.
StorageOperation appendOp = generateAppendAndUpdateMetadata(i, SEGMENT_ID, context);
context.segmentAggregator.add(appendOp);
getAppendData(appendOp, writtenData, context);
// Call flush() and inspect the result.
setException.set(null);
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
WriterFlushResult flushResult = null;
try {
flushResult = context.segmentAggregator.flush(TIMEOUT).join();
Assert.assertNull("An exception was expected, but none was thrown.", setException.get());
Assert.assertNotNull("No FlushResult provided.", flushResult);
} catch (Exception ex) {
if (setException.get() != null) {
Assert.assertEquals("Unexpected exception thrown.", setException.get(), Exceptions.unwrap(ex));
exceptionCount++;
} else {
// Not expecting any exception this time.
throw ex;
}
}
// Check flush result.
if (flushResult != null) {
AssertExtensions.assertGreaterThan("Not enough bytes were flushed (time threshold).", 0, flushResult.getFlushedBytes());
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
}
}
// Do one last flush at the end to make sure we clear out all the buffers, if there's anything else left.
// Force a flush by incrementing the time by a lot.
context.increaseTime(config.getFlushThresholdTime().toMillis() + 1);
context.storage.setWriteSyncErrorInjector(null);
context.storage.setWriteAsyncErrorInjector(null);
context.segmentAggregator.flush(TIMEOUT).join();
// Verify data.
byte[] expectedData = writtenData.toByteArray();
byte[] actualData = new byte[expectedData.length];
long storageLength = context.storage.getStreamSegmentInfo(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join().getLength();
Assert.assertEquals("Unexpected number of bytes flushed to Storage.", expectedData.length, storageLength);
context.storage.read(readHandle(context.segmentAggregator.getMetadata().getName()), 0, actualData, 0, actualData.length, TIMEOUT).join();
Assert.assertArrayEquals("Unexpected data written to storage.", expectedData, actualData);
AssertExtensions.assertGreaterThan("Not enough errors injected.", 0, exceptionCount);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class TableServiceTests method testEndToEnd.
// endregion
/**
* Tests an End-to-End scenario for a {@link TableStore} implementation using a real implementation of {@link StreamSegmentStore}
* (without any mocks or manual event triggering or other test aids). Features tested:
* - Table Segment creation and deletion.
* - Conditional and unconditional updates.
* - Conditional and unconditional removals.
* - Recovering of Table Segments after failover.
*
* This tests both Hash Table Segments and Fixed-Key-Length Table Segments.
*/
@Test
public void testEndToEnd() throws Exception {
val rnd = new Random(0);
val segmentTypes = new SegmentType[] { SegmentType.builder().tableSegment().build(), SegmentType.builder().fixedKeyLengthTableSegment().build() };
ArrayList<String> segmentNames;
HashMap<BufferView, EntryData> keyInfo;
// Phase 1: Create some segments and update some data (unconditionally).
log.info("Starting Phase 1");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Create the Table Segments.
segmentNames = createSegments(tableStore, segmentTypes);
log.info("Created Segments: {}.", String.join(", ", segmentNames));
// Generate the keys and map them to segments.
keyInfo = generateKeysForSegments(segmentNames, rnd);
// Unconditional updates.
val updates = generateUpdates(keyInfo, false, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished unconditional updates.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 1");
}
// Phase 2: Force a recovery and remove all data (unconditionally)
log.info("Starting Phase 2");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Unconditional removals.
val removals = generateRemovals(keyInfo, false);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 2");
}
// Phase 3: Force a recovery and conditionally update and remove data
log.info("Starting Phase 3");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery).
check(keyInfo, tableStore);
// Conditional update.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
val offsetConditionedUpdates = generateUpdates(keyInfo, true, rnd);
val offsetUpdateVersions = executeOffsetConditionalUpdates(offsetConditionedUpdates, -1L, tableStore);
acceptUpdates(offsetConditionedUpdates, offsetUpdateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Conditional remove.
val removals = generateRemovals(keyInfo, true);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
val offsetConditionedRemovals = generateRemovals(keyInfo, true);
executeOffsetConditonalRemovals(offsetConditionedRemovals, -1L, tableStore);
acceptRemovals(offsetConditionedRemovals, keyInfo);
log.info("Finished conditional removes.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 3");
}
// Phase 4: Force a recovery and conditionally remove all data
log.info("Starting Phase 4");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Conditional update again.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Delete all.
val deletions = segmentNames.stream().map(s -> tableStore.deleteSegment(s, false, TIMEOUT)).collect(Collectors.toList());
Futures.allOf(deletions).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
log.info("Finished Phase 4");
}
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class TableServiceTests method executeOffsetConditonalRemovals.
private void executeOffsetConditonalRemovals(HashMap<String, ArrayList<TableKey>> removals, long tableSegmentOffset, TableStore tableStore) throws Exception {
val updateResult = removals.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> tableStore.remove(e.getKey(), e.getValue(), tableSegmentOffset, TIMEOUT)));
Futures.allOf(updateResult.values()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
use of org.junit.rules.Timeout in project pravega by pravega.
the class TableServiceTests method executeRemovals.
private void executeRemovals(HashMap<String, ArrayList<TableKey>> removals, TableStore tableStore) throws Exception {
val updateResult = removals.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> tableStore.remove(e.getKey(), e.getValue(), TIMEOUT)));
Futures.allOf(updateResult.values()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
}
Aggregations