use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StorageWriterTests method testCleanup.
/**
* Tests the ability of the StorageWriter to cleanup SegmentAggregators that have been deleted in Storage or are
* gone from the Metadata.
* 1. Creates 3 segments, and adds an append for each of them.
* 2. Marks segment 2 as deleted (in metadata) and evicts segment 3 from metadata (no deletion).
* 3. Runs one more Writer cycle (to clean up).
* 4. Reinstates the missing segment metadatas and adds appends for each of them, verifying that the Writer re-requests
* the metadata for those two.
*/
@Test
public void testCleanup() throws Exception {
final WriterConfig config = WriterConfig.builder().with(WriterConfig.FLUSH_THRESHOLD_BYTES, // This differs from DEFAULT_CONFIG.
1).with(WriterConfig.FLUSH_THRESHOLD_MILLIS, 1000L).with(WriterConfig.MIN_READ_TIMEOUT_MILLIS, 10L).with(WriterConfig.MAX_READ_TIMEOUT_MILLIS, 250L).with(WriterConfig.MAX_ITEMS_TO_READ_AT_ONCE, 100).with(WriterConfig.ERROR_SLEEP_MILLIS, 0L).build();
@Cleanup TestContext context = new TestContext(config);
context.writer.startAsync();
// Create a bunch of segments and Transaction.
final ArrayList<Long> segmentIds = createSegments(context);
final UpdateableSegmentMetadata segment1 = context.metadata.getStreamSegmentMetadata(segmentIds.get(0));
final UpdateableSegmentMetadata segment2 = context.metadata.getStreamSegmentMetadata(segmentIds.get(1));
final UpdateableSegmentMetadata segment3 = context.metadata.getStreamSegmentMetadata(segmentIds.get(2));
final byte[] data = new byte[1];
Function<UpdateableSegmentMetadata, Operation> createAppend = segment -> {
StreamSegmentAppendOperation append = new StreamSegmentAppendOperation(segment.getId(), data, null);
append.setStreamSegmentOffset(segment.getLength());
context.dataSource.recordAppend(append);
segment.setLength(segment.getLength() + data.length);
return new CachedStreamSegmentAppendOperation(append);
};
// Process an append for each segment, to make sure the writer has knowledge of those segments.
context.dataSource.add(createAppend.apply(segment1));
context.dataSource.add(createAppend.apply(segment2));
context.dataSource.add(createAppend.apply(segment3));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Delete segment2 (markDeleted) and evict segment3 (by forcing the metadata to forget about it).
long evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
context.metadata.getStreamSegmentId(segment2.getName(), true);
segment2.markDeleted();
Collection<Long> evictedSegments = evictSegments(evictionCutoff, context);
// Make sure the right segment is evicted, and not the other two ones (there are other segments in this system which we don't care about).
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment3.getId()));
Assert.assertFalse("Unexpected segments were not evicted.", evictedSegments.contains(segment1.getId()) && evictedSegments.contains(segment3.getId()));
// Add one more append to Segment1 - this will force the writer to go on a full iteration and thus invoke cleanup.
context.dataSource.add(createAppend.apply(segment1));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Get rid of Segment2 from the metadata.
evictionCutoff = context.metadata.nextOperationSequenceNumber() + 1;
context.metadata.getStreamSegmentId(segment1.getName(), true);
evictedSegments = evictSegments(evictionCutoff, context);
Assert.assertTrue("Expected segment was not evicted.", evictedSegments.contains(segment2.getId()));
// Repopulate the metadata.
val segment2Take2 = context.metadata.mapStreamSegmentId(segment2.getName(), segment2.getId());
val segment3Take2 = context.metadata.mapStreamSegmentId(segment3.getName(), segment3.getId());
segment2Take2.copyFrom(segment2);
segment3Take2.copyFrom(segment3);
// Add an append for each of the re-added segments and verify that the Writer re-requested the metadata, which
// indicates it had to recreate their SegmentAggregators.
HashSet<Long> requestedSegmentIds = new HashSet<>();
context.dataSource.setSegmentMetadataRequested(requestedSegmentIds::add);
context.dataSource.add(createAppend.apply(segment2Take2));
context.dataSource.add(createAppend.apply(segment3Take2));
metadataCheckpoint(context);
context.dataSource.waitFullyAcked().get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertTrue("The deleted segments did not have their metadata requested.", requestedSegmentIds.contains(segment2.getId()) && requestedSegmentIds.contains(segment3.getId()));
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StorageWriterTests method appendDataDepthFirst.
/**
* Appends data, depth-first, by filling up one segment before moving on to another.
*/
private void appendDataDepthFirst(Collection<Long> segmentIds, Function<Long, Integer> getAppendsPerSegment, HashMap<Long, ByteArrayOutputStream> segmentContents, TestContext context) {
int writeId = 0;
for (long segmentId : segmentIds) {
UpdateableSegmentMetadata segmentMetadata = context.metadata.getStreamSegmentMetadata(segmentId);
int appendCount = getAppendsPerSegment.apply(segmentId);
for (int i = 0; i < appendCount; i++) {
appendData(segmentMetadata, i, writeId, segmentContents, context);
writeId++;
}
}
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StreamSegmentMapperTests method testGetStreamSegmentInfoWithConcurrency.
/**
* Tests GetStreamSegmentInfo when it is invoked in parallel with a Segment assignment.
*/
@Test
public void testGetStreamSegmentInfoWithConcurrency() throws Exception {
// is driven by the same code for Transactions as well.
final String segmentName = "Segment";
final long segmentId = 1;
final SegmentProperties storageInfo = StreamSegmentInformation.builder().name(segmentName).length(123).sealed(true).build();
final long metadataLength = storageInfo.getLength() + 1;
HashSet<String> storageSegments = new HashSet<>();
storageSegments.add(segmentName);
@Cleanup TestContext context = new TestContext();
AtomicInteger storageGetCount = new AtomicInteger();
setupStorageGetHandler(context, storageSegments, sn -> {
storageGetCount.incrementAndGet();
return storageInfo;
});
setSavedState(segmentName, segmentId, 0L, ATTRIBUTE_COUNT, context);
val segmentState = context.stateStore.get(segmentName, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
context.operationLog.addHandler = op -> {
addInvoked.join();
// Need to set SegmentId on operation.
StreamSegmentMapOperation sop = (StreamSegmentMapOperation) op;
UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(sop.getLength());
segmentMetadata.setLength(metadataLength);
segmentMetadata.updateAttributes(expectedAttributes);
if (sop.isSealed()) {
segmentMetadata.markSealed();
}
return CompletableFuture.completedFuture(null);
};
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> CompletableFuture.completedFuture(null));
// Concurrently with the map, request a Segment Info.
CompletableFuture<SegmentProperties> segmentInfoFuture = context.mapper.getStreamSegmentInfo(segmentName, TIMEOUT);
Assert.assertFalse("getSegmentInfo returned a completed future.", segmentInfoFuture.isDone());
// Release the OperationLog add and verify the Segment Info has been served with information from the Metadata.
addInvoked.complete(null);
SegmentProperties segmentInfo = segmentInfoFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val expectedInfo = context.metadata.getStreamSegmentMetadata(segmentId);
assertEquals("Unexpected Segment Info returned.", expectedInfo, segmentInfo);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes returned.", expectedInfo.getAttributes(), segmentInfo);
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class StreamSegmentMapperTests method setupOperationLog.
private void setupOperationLog(TestContext context) {
AtomicLong seqNo = new AtomicLong();
context.operationLog.addHandler = op -> {
long currentSeqNo = seqNo.incrementAndGet();
UpdateableSegmentMetadata sm;
Assert.assertTrue("Unexpected operation type.", op instanceof StreamSegmentMapOperation);
StreamSegmentMapOperation mop = (StreamSegmentMapOperation) op;
if (mop.getStreamSegmentId() == ContainerMetadata.NO_STREAM_SEGMENT_ID) {
mop.setStreamSegmentId(currentSeqNo);
}
if (mop.isTransaction()) {
sm = context.metadata.mapStreamSegmentId(mop.getStreamSegmentName(), mop.getStreamSegmentId(), mop.getParentStreamSegmentId());
} else {
sm = context.metadata.mapStreamSegmentId(mop.getStreamSegmentName(), mop.getStreamSegmentId());
}
sm.setStorageLength(0);
sm.setLength(mop.getLength());
sm.setStartOffset(mop.getStartOffset());
if (mop.isSealed()) {
sm.markSealed();
}
sm.updateAttributes(mop.getAttributes());
return CompletableFuture.completedFuture(null);
};
}
use of io.pravega.segmentstore.server.UpdateableSegmentMetadata in project pravega by pravega.
the class ContainerMetadataUpdateTransactionTests method createMetadata.
private UpdateableContainerMetadata createMetadata() {
UpdateableContainerMetadata metadata = createBlankMetadata();
UpdateableSegmentMetadata segmentMetadata = metadata.mapStreamSegmentId(SEGMENT_NAME, SEGMENT_ID);
segmentMetadata.setLength(SEGMENT_LENGTH);
// Different from Length.
segmentMetadata.setStorageLength(SEGMENT_LENGTH - 1);
segmentMetadata = metadata.mapStreamSegmentId(SEALED_TRANSACTION_NAME, SEALED_TRANSACTION_ID, SEGMENT_ID);
segmentMetadata.setLength(SEALED_TRANSACTION_LENGTH);
segmentMetadata.setStorageLength(SEALED_TRANSACTION_LENGTH);
segmentMetadata.markSealed();
segmentMetadata = metadata.mapStreamSegmentId(NOTSEALED_TRANSACTION_NAME, NOTSEALED_TRANSACTION_ID, SEGMENT_ID);
segmentMetadata.setLength(0);
segmentMetadata.setStorageLength(0);
return metadata;
}
Aggregations