use of io.pravega.segmentstore.contracts.MergeStreamSegmentResult in project pravega by pravega.
the class PravegaRequestProcessorTest method testMetricsOnSegmentMerge.
@Test(timeout = 20000)
public void testMetricsOnSegmentMerge() throws Exception {
String streamSegmentName = "scope/stream/txnSegment";
UUID txnId = UUID.randomUUID();
@Cleanup ServiceBuilder serviceBuilder = newInlineExecutionInMemoryBuilder(getBuilderConfig());
serviceBuilder.initialize();
StreamSegmentStore store = spy(serviceBuilder.createStreamSegmentService());
ServerConnection connection = mock(ServerConnection.class);
doReturn(Futures.failedFuture(new StreamSegmentMergedException(streamSegmentName))).when(store).sealStreamSegment(anyString(), any());
// test txn segment merge
CompletableFuture<MergeStreamSegmentResult> txnFuture = CompletableFuture.completedFuture(createMergeStreamSegmentResult(streamSegmentName, txnId));
doReturn(txnFuture).when(store).mergeStreamSegment(anyString(), anyString(), any(), any());
SegmentStatsRecorder recorderMock = mock(SegmentStatsRecorder.class);
PravegaRequestProcessor processor = new PravegaRequestProcessor(store, mock(TableStore.class), new TrackedConnection(connection), recorderMock, TableSegmentStatsRecorder.noOp(), new PassingTokenVerifier(), false);
processor.createSegment(new WireCommands.CreateSegment(0, streamSegmentName, WireCommands.CreateSegment.NO_SCALE, 0, "", 0));
String transactionName = NameUtils.getTransactionNameFromId(streamSegmentName, txnId);
processor.createSegment(new WireCommands.CreateSegment(1, transactionName, WireCommands.CreateSegment.NO_SCALE, 0, "", 0));
processor.mergeSegments(new WireCommands.MergeSegments(2, streamSegmentName, transactionName, ""));
verify(recorderMock).merge(streamSegmentName, 100L, 10, streamSegmentName.hashCode());
}
use of io.pravega.segmentstore.contracts.MergeStreamSegmentResult in project pravega by pravega.
the class StreamSegmentContainer method mergeStreamSegment.
private CompletableFuture<MergeStreamSegmentResult> mergeStreamSegment(long targetSegmentId, long sourceSegmentId, AttributeUpdateCollection attributeUpdates, TimeoutTimer timer) {
// Get a reference to the source segment's metadata now, before the merge. It may not be accessible afterwards.
SegmentMetadata sourceMetadata = this.metadata.getStreamSegmentMetadata(sourceSegmentId);
CompletableFuture<Void> sealResult = trySealStreamSegment(sourceMetadata, timer.getRemaining());
if (sourceMetadata.getLength() == 0) {
// writes in the pipeline. As such, we cannot pipeline the two operations, and must wait for the seal to finish first.
return sealResult.thenComposeAsync(v -> {
// to and including the seal, so if there were any writes outstanding before, they should now be reflected in it.
if (sourceMetadata.getLength() == 0) {
// Source is still empty after sealing - OK to delete.
log.debug("{}: Updating attributes (if any) and deleting empty source segment instead of merging {}.", this.traceObjectId, sourceMetadata.getName());
// Execute the attribute update on the target segment only if needed.
Supplier<CompletableFuture<Void>> updateAttributesIfNeeded = () -> attributeUpdates == null ? CompletableFuture.completedFuture(null) : updateAttributesForSegment(targetSegmentId, attributeUpdates, timer.getRemaining());
return updateAttributesIfNeeded.get().thenCompose(v2 -> deleteStreamSegment(sourceMetadata.getName(), timer.getRemaining()).thenApply(v3 -> new MergeStreamSegmentResult(this.metadata.getStreamSegmentMetadata(targetSegmentId).getLength(), sourceMetadata.getLength(), sourceMetadata.getAttributes())));
} else {
// Source now has some data - we must merge the two.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return processAttributeUpdaterOperation(operation, timer).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}, this.executor);
} else {
// Source is not empty, so we cannot delete. Make use of the DurableLog's pipelining abilities by queueing up
// the Merge right after the Seal.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return CompletableFuture.allOf(sealResult, processAttributeUpdaterOperation(operation, timer)).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}
use of io.pravega.segmentstore.contracts.MergeStreamSegmentResult in project pravega by pravega.
the class StreamSegmentContainer method mergeStreamSegment.
@Override
public CompletableFuture<MergeStreamSegmentResult> mergeStreamSegment(String targetStreamSegment, String sourceStreamSegment, AttributeUpdateCollection attributes, Duration timeout) {
ensureRunning();
logRequest("mergeStreamSegment", targetStreamSegment, sourceStreamSegment);
this.metrics.mergeSegment();
TimeoutTimer timer = new TimeoutTimer(timeout);
// complete the cleanup phase, but still bubble up any exceptions to the caller.
return this.metadataStore.getOrAssignSegmentId(targetStreamSegment, timer.getRemaining(), targetSegmentId -> this.metadataStore.getOrAssignSegmentId(sourceStreamSegment, timer.getRemaining(), sourceSegmentId -> mergeStreamSegment(targetSegmentId, sourceSegmentId, attributes, timer))).handleAsync((msr, ex) -> {
if (ex == null || Exceptions.unwrap(ex) instanceof StreamSegmentMergedException) {
// No exception or segment was already merged. Need to clear SegmentInfo for source.
// We can do this asynchronously and not wait on it.
this.metadataStore.clearSegmentInfo(sourceStreamSegment, timer.getRemaining());
}
if (ex == null) {
// Everything is good. Return the result.
return msr;
} else {
// Re-throw the exception to the caller in this case.
throw new CompletionException(ex);
}
}, this.executor);
}
Aggregations