use of io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation in project pravega by pravega.
the class SegmentAggregatorTests method generateSimpleAppend.
private StorageOperation generateSimpleAppend(long segmentId, TestContext context) {
byte[] data = "Append_Dummy".getBytes();
UpdateableSegmentMetadata segmentMetadata = context.containerMetadata.getStreamSegmentMetadata(segmentId);
long offset = segmentMetadata.getLength();
StreamSegmentAppendOperation op = new StreamSegmentAppendOperation(segmentId, data, null);
op.setStreamSegmentOffset(offset);
return op;
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation in project pravega by pravega.
the class SegmentAggregatorTests method generateAppendAndUpdateMetadata.
private StorageOperation generateAppendAndUpdateMetadata(long segmentId, byte[] data, TestContext context) {
UpdateableSegmentMetadata segmentMetadata = context.containerMetadata.getStreamSegmentMetadata(segmentId);
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
StreamSegmentAppendOperation op = new StreamSegmentAppendOperation(segmentId, data, null);
op.setStreamSegmentOffset(offset);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(op);
return new CachedStreamSegmentAppendOperation(op);
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation in project pravega by pravega.
the class StorageWriterTests method testCleanup.
/**
* Tests the ability of the StorageWriter to cleanup SegmentAggregators that have been deleted in Storage or are
* gone from the Metadata.
* 1. Creates 3 segments, and adds an append for each of them.
* 2. Marks segment 2 as deleted (in metadata) and evicts segment 3 from metadata (no deletion).
* 3. Runs one more Writer cycle (to clean up).
* 4. Reinstates the missing segment metadatas and adds appends for each of them, verifying that the Writer re-requests
* the metadata for those two.
*/
@Test
public void testCleanup() throws Exception {
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // This differs from DEFAULT_CONFIG.
1).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).with(WriterConfig.MAX_READ_TIMEOUT_MILLIS, 250L).with(WriterConfig.MAX_ITEMS_TO_READ_AT_ONCE, 100).with(WriterConfig.ERROR_SLEEP_MILLIS, 0L).build();
@Cleanup TestContext context = new TestContext(config);
context.writer.startAsync();
// Create a bunch of segments and Transaction.
final ArrayList<Long> segmentIds = createSegments(context);
final UpdateableSegmentMetadata segment1 = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
final UpdateableSegmentMetadata segment2 = context.metadata.getStreamSegmentMetadata(segmentIds.get(1));
final UpdateableSegmentMetadata segment3 = context.metadata.getStreamSegmentMetadata(segmentIds.get(2));
final byte[] data = new byte[1];
Function<UpdateableSegmentMetadata, Operation> createAppend = segment -> {
StreamSegmentAppendOperation append = new StreamSegmentAppendOperation(segment.getId(), data, null);
append.setStreamSegmentOffset(segment.getLength());
context.dataSource.recordAppend(append);
segment.setLength(segment.getLength() + data.length);
return new CachedStreamSegmentAppendOperation(append);
};
// Process an append for each segment, to make sure the writer has knowledge of those segments.
context.dataSource.add(createAppend.apply(segment1));
context.dataSource.add(createAppend.apply(segment2));
context.dataSource.add(createAppend.apply(segment3));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Delete segment2 (markDeleted) and evict segment3 (by forcing the metadata to forget about it).
long evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
context.metadata.getStreamSegmentId(segment2.getName(), true);
segment2.markDeleted();
Collection<Long> evictedSegments = evictSegments(evictionCutoff, context);
// Make sure the right segment is evicted, and not the other two ones (there are other segments in this system which we don't care about).
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment3.getId()));
Assert.assertFalse("Unexpected segments were not evicted.", evictedSegments.contains(segment1.getId()) && evictedSegments.contains(segment3.getId()));
// Add one more append to Segment1 - this will force the writer to go on a full iteration and thus invoke cleanup.
context.dataSource.add(createAppend.apply(segment1));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Get rid of Segment2 from the metadata.
evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
evictedSegments = evictSegments(evictionCutoff, context);
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment2.getId()));
// Repopulate the metadata.
val segment2Take2 = context.metadata.mapStreamSegmentId(segment2.getName(), segment2.getId());
val segment3Take2 = context.metadata.mapStreamSegmentId(segment3.getName(), segment3.getId());
segment2Take2.copyFrom(segment2);
segment3Take2.copyFrom(segment3);
// Add an append for each of the re-added segments and verify that the Writer re-requested the metadata, which
// indicates it had to recreate their SegmentAggregators.
HashSet<Long> requestedSegmentIds = new HashSet<>();
context.dataSource.setSegmentMetadataRequested(requestedSegmentIds::add);
context.dataSource.add(createAppend.apply(segment2Take2));
context.dataSource.add(createAppend.apply(segment3Take2));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("The deleted segments did not have their metadata requested.", requestedSegmentIds.contains(segment2.getId()) && requestedSegmentIds.contains(segment3.getId()));
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testPreProcessStreamSegmentAppend.
// region StreamSegmentAppendOperation
/**
* Tests the preProcess method with StreamSegmentAppend operations.
* Scenarios:
* * Recovery Mode
* * Non-recovery mode
* * StreamSegment is Merged (both in-transaction and in-metadata)
* * StreamSegment is Sealed (both in-transaction and in-metadata)
*/
@Test
public void testPreProcessStreamSegmentAppend() throws Exception {
val metadata = createMetadata();
StreamSegmentAppendOperation appendOp = createAppendNoOffset();
// When everything is OK (in recovery mode) - nothing should change.
metadata.enterRecoveryMode();
val txn1 = createUpdateTransaction(metadata);
txn1.preProcessOperation(appendOp);
AssertExtensions.assertLessThan("Unexpected StreamSegmentOffset after call to preProcess in recovery mode.", 0, appendOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(appendOp, "call to preProcess in recovery mode");
Assert.assertEquals("preProcess(Append) seems to have changed the Updater internal state in recovery mode.", SEGMENT_LENGTH, txn1.getStreamSegmentMetadata(SEGMENT_ID).getLength());
Assert.assertEquals("preProcess(Append) seems to have changed the metadata in recovery mode.", SEGMENT_LENGTH, metadata.getStreamSegmentMetadata(SEGMENT_ID).getLength());
// When everything is OK (no recovery mode).
metadata.exitRecoveryMode();
val txn2 = createUpdateTransaction(metadata);
txn2.preProcessOperation(appendOp);
Assert.assertEquals("Unexpected StreamSegmentOffset after call to preProcess in non-recovery mode.", SEGMENT_LENGTH, appendOp.getStreamSegmentOffset());
checkNoSequenceNumberAssigned(appendOp, "call to preProcess in non-recovery mode");
Assert.assertEquals("preProcess(Append) seems to have changed the Updater internal state.", SEGMENT_LENGTH, txn2.getStreamSegmentMetadata(SEGMENT_ID).getLength());
Assert.assertEquals("preProcess(Append) seems to have changed the metadata.", SEGMENT_LENGTH, metadata.getStreamSegmentMetadata(SEGMENT_ID).getLength());
// When StreamSegment is merged (via transaction).
StreamSegmentAppendOperation transactionAppendOp = new StreamSegmentAppendOperation(SEALED_TRANSACTION_ID, DEFAULT_APPEND_DATA, null);
MergeTransactionOperation mergeOp = createMerge();
txn2.preProcessOperation(mergeOp);
txn2.acceptOperation(mergeOp);
Assert.assertFalse("Transaction should not be merged in metadata (yet).", metadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Append) when Segment is merged (in transaction).", () -> txn2.preProcessOperation(transactionAppendOp), ex -> ex instanceof StreamSegmentMergedException);
// When StreamSegment is merged (via metadata).
txn2.commit(metadata);
Assert.assertTrue("Transaction should have been merged in metadata.", metadata.getStreamSegmentMetadata(SEALED_TRANSACTION_ID).isMerged());
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Append) when Segment is merged (in metadata).", () -> txn2.preProcessOperation(transactionAppendOp), ex -> ex instanceof StreamSegmentMergedException);
// When StreamSegment is sealed (via transaction).
StreamSegmentSealOperation sealOp = createSeal();
txn2.preProcessOperation(sealOp);
txn2.acceptOperation(sealOp);
Assert.assertFalse("StreamSegment should not be sealed in metadata (yet).", metadata.getStreamSegmentMetadata(SEGMENT_ID).isSealed());
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Append) when Segment is sealed (in transaction).", () -> txn2.preProcessOperation(createAppendNoOffset()), ex -> ex instanceof StreamSegmentSealedException);
// When StreamSegment is sealed (via metadata).
txn2.commit(metadata);
Assert.assertTrue("StreamSegment should have been sealed in metadata.", metadata.getStreamSegmentMetadata(SEGMENT_ID).isSealed());
AssertExtensions.assertThrows("Unexpected behavior for preProcess(Append) when Segment is sealed (in metadata).", () -> txn2.preProcessOperation(createAppendNoOffset()), ex -> ex instanceof StreamSegmentSealedException);
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentAppendOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method testMaxAttributeLimit.
/**
* Tests the ability of the ContainerMetadataUpdateTransaction to enforce the maximum attribute limit on Segments.
*/
@Test
public void testMaxAttributeLimit() throws Exception {
// We check all operations that can update attributes.
val ops = new HashMap<String, Function<Collection<AttributeUpdate>, Operation>>();
ops.put("UpdateAttributes", u -> new UpdateAttributesOperation(SEGMENT_ID, u));
ops.put("Append", u -> new StreamSegmentAppendOperation(SEGMENT_ID, DEFAULT_APPEND_DATA, u));
// Set the maximum allowed number of attributes on a segment.
UpdateableContainerMetadata metadata = createMetadata();
val initialUpdates = new ArrayList<AttributeUpdate>(SegmentMetadata.MAXIMUM_ATTRIBUTE_COUNT);
val expectedValues = new HashMap<UUID, Long>();
for (int i = 0; i < SegmentMetadata.MAXIMUM_ATTRIBUTE_COUNT; i++) {
UUID attributeId;
do {
attributeId = UUID.randomUUID();
} while (expectedValues.containsKey(attributeId));
initialUpdates.add(new AttributeUpdate(attributeId, AttributeUpdateType.None, i));
expectedValues.put(attributeId, (long) i);
}
// And load them up into an UpdateTransaction.
val txn = createUpdateTransaction(metadata);
val initialOp = new UpdateAttributesOperation(SEGMENT_ID, initialUpdates);
txn.preProcessOperation(initialOp);
txn.acceptOperation(initialOp);
// invokes preProcessOperation() - which is responsible with validation, so no changes are made to the UpdateTransaction.
for (val opGenerator : ops.entrySet()) {
// Value replacement.
val replacementUpdates = new ArrayList<AttributeUpdate>();
int i = 0;
for (val e : expectedValues.entrySet()) {
AttributeUpdate u;
switch((i++) % 4) {
case 0:
u = new AttributeUpdate(e.getKey(), AttributeUpdateType.ReplaceIfEquals, e.getValue() + 1, e.getValue());
break;
case 1:
u = new AttributeUpdate(e.getKey(), AttributeUpdateType.ReplaceIfGreater, e.getValue() + 1);
break;
case 2:
u = new AttributeUpdate(e.getKey(), AttributeUpdateType.Accumulate, 1);
break;
default:
u = new AttributeUpdate(e.getKey(), AttributeUpdateType.Replace, 1);
break;
}
replacementUpdates.add(u);
}
// This should not throw anything.
txn.preProcessOperation(opGenerator.getValue().apply(replacementUpdates));
// Removal - this should not throw anything either.
val toRemoveId = initialUpdates.get(0).getAttributeId();
val toRemoveUpdate = new AttributeUpdate(toRemoveId, AttributeUpdateType.Replace, SegmentMetadata.NULL_ATTRIBUTE_VALUE);
txn.preProcessOperation(opGenerator.getValue().apply(Collections.singleton(toRemoveUpdate)));
// Addition - this should throw.
UUID toAddId;
do {
toAddId = UUID.randomUUID();
} while (expectedValues.containsKey(toAddId));
val toAddUpdate = new AttributeUpdate(toAddId, AttributeUpdateType.None, 1);
AssertExtensions.assertThrows("Too many attributes were accepted for operation " + opGenerator.getKey(), () -> txn.preProcessOperation(opGenerator.getValue().apply(Collections.singleton(toAddUpdate))), ex -> ex instanceof TooManyAttributesException);
// Removal+Addition+Replacement: this particular setup should not throw anything.
val mixedUpdates = Arrays.asList(new AttributeUpdate(toAddId, AttributeUpdateType.None, 1), new AttributeUpdate(toRemoveId, AttributeUpdateType.Replace, SegmentMetadata.NULL_ATTRIBUTE_VALUE), new AttributeUpdate(initialUpdates.get(1).getAttributeId(), AttributeUpdateType.Replace, 10));
txn.preProcessOperation(opGenerator.getValue().apply(mixedUpdates));
}
}
Aggregations