use of io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testCommit.
private void testCommit(UpdateableContainerMetadata baseMetadata, UpdateableContainerMetadata targetMetadata) throws Exception {
// Create a few appends, merge a Transaction and seal the parent stream. Verify all changes have been applied after
// a call to commit().
int appendCount = 500;
ArrayList<StorageOperation> operations = new ArrayList<>();
for (int i = 0; i < appendCount; i++) {
operations.add(createAppendNoOffset());
}
operations.add(createMerge());
operations.add(createSeal());
baseMetadata.getStreamSegmentMetadata(SEGMENT_ID).updateAttributes(Collections.singletonMap(Attributes.CREATION_TIME, 0L));
val txn = createUpdateTransaction(baseMetadata);
long expectedLastUsedParent = -1;
long expectedLastUsedTransaction = -1;
long seqNo = 0;
for (StorageOperation op : operations) {
txn.preProcessOperation(op);
op.setSequenceNumber(++seqNo);
txn.acceptOperation(op);
if (op.getStreamSegmentId() == SEGMENT_ID) {
expectedLastUsedParent = op.getSequenceNumber();
}
if (op instanceof MergeTransactionOperation) {
expectedLastUsedParent = op.getSequenceNumber();
expectedLastUsedTransaction = op.getSequenceNumber();
}
}
txn.commit(targetMetadata);
Assert.assertEquals("commit() seems to have modified the metadata sequence number while not in recovery mode.", ContainerMetadata.INITIAL_OPERATION_SEQUENCE_NUMBER, targetMetadata.nextOperationSequenceNumber() - 1);
long expectedLength = SEGMENT_LENGTH + appendCount * DEFAULT_APPEND_DATA.length + SEALED_TRANSACTION_LENGTH;
SegmentMetadata parentMetadata = targetMetadata.getStreamSegmentMetadata(SEGMENT_ID);
Assert.assertEquals("Unexpected Length in metadata after commit.", expectedLength, parentMetadata.getLength());
Assert.assertTrue("Unexpected value for isSealed in metadata after commit.", parentMetadata.isSealed());
checkLastKnownSequenceNumber("Unexpected lastUsed for Parent after commit.", expectedLastUsedParent, parentMetadata);
SegmentMetadata transactionMetadata = targetMetadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID);
Assert.assertTrue("Unexpected value for isSealed in Transaction metadata after commit.", transactionMetadata.isSealed());
Assert.assertTrue("Unexpected value for isMerged in Transaction metadata after commit.", transactionMetadata.isMerged());
Assert.assertEquals("Unexpected number of attributes for parent segment.", 1, parentMetadata.getAttributes().size());
Assert.assertEquals("Unexpected number of attributes for transaction.", 0, transactionMetadata.getAttributes().size());
checkLastKnownSequenceNumber("Unexpected lastUsed for Transaction after commit.", expectedLastUsedTransaction, transactionMetadata);
}
use of io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation in project pravega by pravega.
the class SegmentAggregatorTests method generateMergeTransactionAndUpdateMetadata.
private StorageOperation generateMergeTransactionAndUpdateMetadata(long transactionId, TestContext context) {
UpdateableSegmentMetadata transactionMetadata = context.containerMetadata.getStreamSegmentMetadata(transactionId);
UpdateableSegmentMetadata parentMetadata = context.containerMetadata.getStreamSegmentMetadata(transactionMetadata.getParentId());
MergeTransactionOperation op = new MergeTransactionOperation(parentMetadata.getId(), transactionMetadata.getId());
op.setLength(transactionMetadata.getLength());
op.setStreamSegmentOffset(parentMetadata.getLength());
parentMetadata.setLength(parentMetadata.getLength() + transactionMetadata.getLength());
transactionMetadata.markMerged();
return op;
}
use of io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation in project pravega by pravega.
the class SegmentAggregatorTests method testAddWithBadInput.
/**
* Tests the add() method with invalid arguments.
*/
@Test
public void testAddWithBadInput() throws Exception {
final long badTransactionId = 12345;
final long badParentId = 56789;
final String badParentName = "Foo_Parent";
final String badTransactionName = "Foo_Transaction";
@Cleanup TestContext context = new TestContext(DEFAULT_CONFIG);
// We only needs one Transaction for this test.
SegmentAggregator transactionAggregator = context.transactionAggregators[0];
SegmentMetadata transactionMetadata = transactionAggregator.getMetadata();
context.storage.create(context.segmentAggregator.getMetadata().getName(), TIMEOUT).join();
context.storage.create(transactionMetadata.getName(), TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
transactionAggregator.initialize(TIMEOUT).join();
// Create 2 more segments that can be used to verify MergeTransactionOperation.
context.containerMetadata.mapStreamSegmentId(badParentName, badParentId);
UpdateableSegmentMetadata badTransactionMetadata = context.containerMetadata.mapStreamSegmentId(badTransactionName, badTransactionId, badParentId);
badTransactionMetadata.setLength(0);
badTransactionMetadata.setStorageLength(0);
context.storage.create(badTransactionMetadata.getName(), TIMEOUT).join();
// 1. MergeTransactionOperation
// 1a.Verify that MergeTransactionOperation cannot be added to the Transaction segment.
AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation on the Transaction segment.", () -> transactionAggregator.add(generateSimpleMergeTransaction(transactionMetadata.getId(), context)), ex -> ex instanceof IllegalArgumentException);
// 1b. Verify that MergeTransactionOperation has the right parent.
AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation on the parent for a Transaction that did not have it as a parent.", () -> transactionAggregator.add(generateSimpleMergeTransaction(badTransactionId, context)), ex -> ex instanceof IllegalArgumentException);
// 2. StreamSegmentSealOperation.
// 2a. Verify we cannot add a StreamSegmentSealOperation if the segment is not sealed yet.
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation for a non-sealed segment.", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionAggregator.initialize(TIMEOUT).join();
badTransactionAggregator.add(generateSimpleSeal(badTransactionId, context));
}, ex -> ex instanceof DataCorruptionException);
// 2b. Verify that nothing is allowed after Seal (after adding one append to and sealing the Transaction Segment).
StorageOperation transactionAppend1 = generateAppendAndUpdateMetadata(0, transactionMetadata.getId(), context);
transactionAggregator.add(transactionAppend1);
transactionAggregator.add(generateSealAndUpdateMetadata(transactionMetadata.getId(), context));
AssertExtensions.assertThrows("add() allowed operation after seal.", () -> transactionAggregator.add(generateSimpleAppend(transactionMetadata.getId(), context)), ex -> ex instanceof DataCorruptionException);
// 3. CachedStreamSegmentAppendOperation.
final StorageOperation parentAppend1 = generateAppendAndUpdateMetadata(0, SEGMENT_ID, context);
// 3a. Verify we cannot add StreamSegmentAppendOperations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentAppendOperation.", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(parentAppend1.getStreamSegmentId(), parentAppend1.getStreamSegmentOffset(), new byte[(int) parentAppend1.getLength()], null);
context.segmentAggregator.add(badAppend);
}, ex -> ex instanceof IllegalArgumentException);
// Add this one append to the parent (nothing unusual here); we'll use this for the next tests.
context.segmentAggregator.add(parentAppend1);
// 3b. Verify we cannot add anything beyond the DurableLogOffset (offset or offset+length).
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset).", () -> {
// We have the correct offset, but we did not increase the Length.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
((UpdateableSegmentMetadata) context.segmentAggregator.getMetadata()).setLength(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
AssertExtensions.assertThrows("add() allowed an operation beyond the DurableLogOffset (offset+length).", () -> {
// We have the correct offset, but we the append exceeds the Length by 1 byte.
StreamSegmentAppendOperation badAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
badAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badAppend));
}, ex -> ex instanceof DataCorruptionException);
// 3c. Verify contiguity (offsets - we cannot have gaps in the data).
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too small).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
badOffsetAppend.setStreamSegmentOffset(0);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large).", () -> {
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
badOffsetAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength() + 1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
AssertExtensions.assertThrows("add() allowed an operation with wrong offset (too large, but no pending operations).", () -> {
@Cleanup SegmentAggregator badTransactionAggregator = new SegmentAggregator(badTransactionMetadata, context.dataSource, context.storage, DEFAULT_CONFIG, context.timer, executorService());
badTransactionMetadata.setLength(100);
badTransactionAggregator.initialize(TIMEOUT).join();
StreamSegmentAppendOperation badOffsetAppend = new StreamSegmentAppendOperation(context.segmentAggregator.getMetadata().getId(), "foo".getBytes(), null);
badOffsetAppend.setStreamSegmentOffset(1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badOffsetAppend));
}, ex -> ex instanceof DataCorruptionException);
// 4. Verify Segment Id match.
AssertExtensions.assertThrows("add() allowed an Append operation with wrong Segment Id.", () -> {
StreamSegmentAppendOperation badIdAppend = new StreamSegmentAppendOperation(Integer.MAX_VALUE, "foo".getBytes(), null);
badIdAppend.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(badIdAppend));
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a StreamSegmentSealOperation with wrong SegmentId.", () -> {
StreamSegmentSealOperation badIdSeal = new StreamSegmentSealOperation(Integer.MAX_VALUE);
badIdSeal.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
context.segmentAggregator.add(badIdSeal);
}, ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("add() allowed a MergeTransactionOperation with wrong SegmentId.", () -> {
MergeTransactionOperation badIdMerge = new MergeTransactionOperation(Integer.MAX_VALUE, transactionMetadata.getId());
badIdMerge.setStreamSegmentOffset(parentAppend1.getStreamSegmentOffset() + parentAppend1.getLength());
badIdMerge.setLength(1);
context.segmentAggregator.add(badIdMerge);
}, ex -> ex instanceof IllegalArgumentException);
// 5. Truncations.
AssertExtensions.assertThrows("add() allowed a StreamSegmentTruncateOperation with a truncation offset beyond the one in the metadata.", () -> {
StreamSegmentTruncateOperation op = new StreamSegmentTruncateOperation(SEGMENT_ID, 10);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.segmentAggregator.add(op);
}, ex -> ex instanceof DataCorruptionException);
}
use of io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation in project pravega by pravega.
the class StorageWriterTests method mergeTransactions.
private void mergeTransactions(Iterable<Long> transactionIds, HashMap<Long, ByteArrayOutputStream> segmentContents, TestContext context) {
for (long transactionId : transactionIds) {
UpdateableSegmentMetadata transactionMetadata = context.metadata.getStreamSegmentMetadata(transactionId);
UpdateableSegmentMetadata parentMetadata = context.metadata.getStreamSegmentMetadata(transactionMetadata.getParentId());
Assert.assertFalse("Transaction already merged", transactionMetadata.isMerged());
Assert.assertTrue("Transaction not sealed prior to merger", transactionMetadata.isSealed());
Assert.assertFalse("Parent is sealed already merged", parentMetadata.isSealed());
// Create the Merge Op
MergeTransactionOperation op = new MergeTransactionOperation(parentMetadata.getId(), transactionMetadata.getId());
op.setLength(transactionMetadata.getLength());
op.setStreamSegmentOffset(parentMetadata.getLength());
// Update metadata
parentMetadata.setLength(parentMetadata.getLength() + transactionMetadata.getLength());
transactionMetadata.markMerged();
// Process the merge op
context.dataSource.add(op);
try {
segmentContents.get(parentMetadata.getId()).write(segmentContents.get(transactionMetadata.getId()).toByteArray());
} catch (IOException ex) {
throw new AssertionError(ex);
}
segmentContents.remove(transactionId);
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeTransactionOperation in project pravega by pravega.
the class StreamSegmentContainer method mergeTransaction.
@Override
public CompletableFuture<Void> mergeTransaction(String transactionName, Duration timeout) {
ensureRunning();
logRequest("mergeTransaction", transactionName);
this.metrics.mergeTxn();
TimeoutTimer timer = new TimeoutTimer(timeout);
return this.segmentMapper.getOrAssignStreamSegmentId(transactionName, timer.getRemaining(), transactionId -> {
SegmentMetadata transactionMetadata = this.metadata.getStreamSegmentMetadata(transactionId);
if (transactionMetadata == null) {
throw new CompletionException(new StreamSegmentNotExistsException(transactionName));
}
Operation op = new MergeTransactionOperation(transactionMetadata.getParentId(), transactionMetadata.getId());
return this.durableLog.add(op, timer.getRemaining());
}).thenComposeAsync(v -> this.stateStore.remove(transactionName, timer.getRemaining()), this.executor);
}
Aggregations