use of io.pravega.segmentstore.storage.LogAddress in project pravega by pravega.
the class OperationProcessorTests method performLogOperationChecks.
private void performLogOperationChecks(Collection<OperationWithCompletion> operations, SequencedItemList<Operation> memoryLog, DurableDataLog dataLog, TruncationMarkerRepository truncationMarkers, int maxCount) throws Exception {
// Log Operation based checks
val successfulOps = operations.stream().filter(oc -> !oc.completion.isCompletedExceptionally()).map(oc -> oc.operation).filter(Operation::canSerialize).limit(maxCount).collect(Collectors.toList());
@Cleanup DataFrameReader<Operation> dataFrameReader = new DataFrameReader<>(dataLog, new OperationSerializer(), CONTAINER_ID);
long lastSeqNo = -1;
if (successfulOps.size() > 0) {
// Writing to the memory log is asynchronous and we don't have any callbacks to know when it was written to.
// We check periodically until the last item has been written.
await(() -> memoryLog.read(successfulOps.get(successfulOps.size() - 1).getSequenceNumber() - 1, 1).hasNext(), 10);
}
Iterator<Operation> memoryLogIterator = memoryLog.read(-1, operations.size() + 1);
OperationComparer memoryLogComparer = new OperationComparer(true);
for (Operation expectedOp : successfulOps) {
// Verify that the operations have been completed and assigned sequential Sequence Numbers.
AssertExtensions.assertGreaterThan("Operations were not assigned sequential Sequence Numbers.", lastSeqNo, expectedOp.getSequenceNumber());
lastSeqNo = expectedOp.getSequenceNumber();
// MemoryLog: verify that the operations match that of the expected list.
Assert.assertTrue("No more items left to read from MemoryLog. Expected: " + expectedOp, memoryLogIterator.hasNext());
// Use memoryLogComparer: we are actually expecting the same object here.
Operation actual = memoryLogIterator.next();
memoryLogComparer.assertEquals("Unexpected Operation in MemoryLog.", expectedOp, actual);
// DataLog: read back using DataFrameReader and verify the operations match that of the expected list.
DataFrameRecord<Operation> dataFrameRecord = dataFrameReader.getNext();
Assert.assertNotNull("No more items left to read from DataLog. Expected: " + expectedOp, dataFrameRecord);
// We are reading the raw operation from the DataFrame, so expect different objects (but same contents).
OperationComparer.DEFAULT.assertEquals(expectedOp, dataFrameRecord.getItem());
// Check truncation markers if this is the last Operation to be written.
LogAddress dataFrameAddress = truncationMarkers.getClosestTruncationMarker(expectedOp.getSequenceNumber());
if (dataFrameRecord.getLastFullDataFrameAddress() != null && dataFrameRecord.getLastFullDataFrameAddress().getSequence() != dataFrameRecord.getLastUsedDataFrameAddress().getSequence()) {
// This operation spans multiple DataFrames. The TruncationMarker should be set on the last DataFrame
// that ends with a part of it.
Assert.assertEquals("Unexpected truncation marker for Operation SeqNo " + expectedOp.getSequenceNumber() + " when it spans multiple DataFrames.", dataFrameRecord.getLastFullDataFrameAddress(), dataFrameAddress);
} else if (dataFrameRecord.isLastFrameEntry()) {
// The operation was the last one in the frame. This is a Truncation Marker.
Assert.assertEquals("Unexpected truncation marker for Operation SeqNo " + expectedOp.getSequenceNumber() + " when it is the last entry in a DataFrame.", dataFrameRecord.getLastUsedDataFrameAddress(), dataFrameAddress);
} else {
// The operation is not the last in the frame, and it doesn't span multiple frames either.
// There could be data after it that is not safe to truncate. The correct Truncation Marker is the
// same as the one for the previous operation.
LogAddress expectedTruncationMarker = truncationMarkers.getClosestTruncationMarker(expectedOp.getSequenceNumber() - 1);
Assert.assertEquals("Unexpected truncation marker for Operation SeqNo " + expectedOp.getSequenceNumber() + " when it is in the middle of a DataFrame.", expectedTruncationMarker, dataFrameAddress);
}
}
}
use of io.pravega.segmentstore.storage.LogAddress in project pravega by pravega.
the class InMemoryDurableDataLogTests method testConstructor.
/**
* Tests the constructor of InMemoryDurableDataLog. The constructor takes in an EntryCollection and this verifies
* that information from a previous instance of an InMemoryDurableDataLog is still accessible.
*/
@Test(timeout = 5000)
public void testConstructor() throws Exception {
InMemoryDurableDataLog.EntryCollection entries = new InMemoryDurableDataLog.EntryCollection();
TreeMap<LogAddress, byte[]> writeData;
// Create first log and write some data to it.
try (DurableDataLog log = new InMemoryDurableDataLog(entries, executorService())) {
log.initialize(TIMEOUT);
writeData = populate(log, WRITE_COUNT);
}
// Close the first log, and open a second one, with the same EntryCollection in the constructor.
try (DurableDataLog log = new InMemoryDurableDataLog(entries, executorService())) {
log.initialize(TIMEOUT);
// Verify it contains the same entries.
verifyReads(log, writeData);
}
}
use of io.pravega.segmentstore.storage.LogAddress in project pravega by pravega.
the class DataFrameTests method testFrameSequence.
/**
* Test getFrameSequence() and getPreviousSequence().
*/
@Test
public void testFrameSequence() {
long newSequence = 67890;
int dataFrameSize = 1000;
DataFrame df = DataFrame.ofSize(dataFrameSize);
LogAddress a = new LogAddress(newSequence) {
};
df.setAddress(a);
Assert.assertEquals("Unexpected value for getFrameSequence().", newSequence, df.getAddress().getSequence());
}
use of io.pravega.segmentstore.storage.LogAddress in project pravega by pravega.
the class OperationProcessorTests method testConcurrentStopAndCommit.
/**
* Tests a scenario where the OperationProcessor is shut down while a DataFrame is being processed and will eventually
* complete successfully - however its operation should be cancelled.
*/
@Test
public void testConcurrentStopAndCommit() throws Exception {
@Cleanup TestContext context = new TestContext();
// Generate some test data.
val segmentId = createStreamSegmentsInMetadata(1, context.metadata).stream().findFirst().orElse(-1L);
List<Operation> operations = Collections.singletonList(new StreamSegmentAppendOperation(segmentId, new byte[1], null));
CompletableFuture<LogAddress> appendCallback = new CompletableFuture<>();
// Setup an OperationProcessor with a custom DurableDataLog and start it.
@Cleanup DurableDataLog dataLog = new ManualAppendOnlyDurableDataLog(() -> appendCallback);
dataLog.initialize(TIMEOUT);
@Cleanup OperationProcessor operationProcessor = new OperationProcessor(context.metadata, context.stateUpdater, dataLog, getNoOpCheckpointPolicy(), executorService());
operationProcessor.startAsync().awaitRunning();
// Process all generated operations.
OperationWithCompletion completionFuture = processOperations(operations, operationProcessor).stream().findFirst().orElse(null);
operationProcessor.stopAsync();
appendCallback.complete(new TestLogAddress(1));
// Stop the processor.
operationProcessor.awaitTerminated();
// Wait for the operation to complete. The operation should have been cancelled (due to the OperationProcessor
// shutting down) - no other exception (or successful completion is accepted).
AssertExtensions.assertThrows("Operation did not fail with the right exception.", () -> completionFuture.completion, ex -> ex instanceof CancellationException || ex instanceof ObjectClosedException);
}
use of io.pravega.segmentstore.storage.LogAddress in project pravega by pravega.
the class BookKeeperLogTests method testAppendTransientBookieFailure.
/**
* Tests the ability to retry writes when Bookies fail.
*/
@Test
public void testAppendTransientBookieFailure() throws Exception {
TreeMap<LogAddress, byte[]> writeData = new TreeMap<>(Comparator.comparingLong(LogAddress::getSequence));
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
val dataList = new ArrayList<byte[]>();
val futures = new ArrayList<CompletableFuture<LogAddress>>();
try {
// Suspend a bookie (this will trigger write errors).
stopFirstBookie();
// Issue appends in parallel, without waiting for them.
int writeCount = getWriteCount();
for (int i = 0; i < writeCount; i++) {
byte[] data = getWriteData();
futures.add(log.append(new ByteArraySegment(data), TIMEOUT));
dataList.add(data);
}
} finally {
// Resume the bookie with the appends still in flight.
restartFirstBookie();
}
// Wait for all writes to complete, then reassemble the data in the order set by LogAddress.
val addresses = Futures.allOfWithResults(futures).join();
for (int i = 0; i < dataList.size(); i++) {
writeData.put(addresses.get(i), dataList.get(i));
}
}
// Verify data.
try (DurableDataLog log = createDurableDataLog()) {
log.initialize(TIMEOUT);
verifyReads(log, writeData);
}
}
Aggregations