use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class DataRecoveryTest method testRepairLogEditOperationUserInput.
@Test
public void testRepairLogEditOperationUserInput() throws IOException {
// Setup command object.
STATE.set(new AdminCommandState());
Properties pravegaProperties = new Properties();
pravegaProperties.setProperty("pravegaservice.container.count", "1");
pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
STATE.get().getConfigBuilder().include(pravegaProperties);
CommandArgs args = new CommandArgs(List.of("0"), STATE.get());
DurableDataLogRepairCommand command = Mockito.spy(new DurableDataLogRepairCommand(args));
// Case 1: Input a Delete Edit Operation with wrong initial/final ids. Then retry with correct ids.
Mockito.doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).doReturn(1L).doReturn(2L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("delete").when(command).getStringUserInput(Mockito.any());
Assert.assertEquals(List.of(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null)), command.getDurableLogEditsFromUser());
// Case 2: Input an Add Edit Operation with a wrong operation type. Then retry with correct operation type.
Mockito.doReturn(true).doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("add").doReturn("wrong").doReturn("add").doReturn("DeleteSegmentOperation").when(command).getStringUserInput(Mockito.any());
DeleteSegmentOperation deleteOperationAdded = new DeleteSegmentOperation(1);
List<DurableDataLogRepairCommand.LogEditOperation> editOps = new ArrayList<>();
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, deleteOperationAdded));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, deleteOperationAdded));
Assert.assertEquals(editOps, command.getDurableLogEditsFromUser());
// Case 3: Create rest of operation types without payload (MergeSegmentOperation, StreamSegmentMapOperation, StreamSegmentTruncateOperation, UpdateAttributesOperation)
long timestamp = System.currentTimeMillis();
UUID uuid = UUID.randomUUID();
editOps.clear();
Mockito.doReturn(true).doReturn(false).doReturn(false).doReturn(true).doReturn(true).doReturn(false).doReturn(false).doReturn(true).doReturn(false).doReturn(true).doReturn(true).doReturn(false).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).doReturn(2L).doReturn(1L).doReturn(2L).doReturn(123L).doReturn(2L).doReturn(2L).doReturn(3L).doReturn(1L).doReturn(10L).doReturn(timestamp).doReturn(3L).doReturn(3L).doReturn(4L).doReturn(4L).doReturn(3L).doReturn(1L).doReturn(2L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("add").doReturn("MergeSegmentOperation").doReturn(uuid.toString()).doReturn("add").doReturn("StreamSegmentMapOperation").doReturn("test").doReturn(uuid.toString()).doReturn("add").doReturn("StreamSegmentTruncateOperation").doReturn("add").doReturn("UpdateAttributesOperation").doReturn(uuid.toString()).when(command).getStringUserInput(Mockito.any());
Mockito.doReturn((int) AttributeUpdateType.Replace.getTypeId()).when(command).getIntUserInput(Mockito.any());
Mockito.doReturn(true).doReturn(true).doReturn(false).doReturn(false).when(command).getBooleanUserInput(Mockito.any());
AttributeUpdateCollection attributeUpdates = new AttributeUpdateCollection();
attributeUpdates.add(new AttributeUpdate(AttributeId.fromUUID(uuid), AttributeUpdateType.Replace, 1, 2));
MergeSegmentOperation mergeSegmentOperation = new MergeSegmentOperation(1, 2, attributeUpdates);
mergeSegmentOperation.setStreamSegmentOffset(123);
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, mergeSegmentOperation));
Map<AttributeId, Long> attributes = new HashMap<>();
attributes.put(AttributeId.fromUUID(uuid), 10L);
SegmentProperties segmentProperties = StreamSegmentInformation.builder().name("test").startOffset(2).length(3).storageLength(1).sealed(true).deleted(false).sealedInStorage(true).deletedInStorage(false).attributes(attributes).lastModified(new ImmutableDate(timestamp)).build();
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 2, 2, new StreamSegmentMapOperation(segmentProperties)));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 3, 3, new StreamSegmentTruncateOperation(3, 3)));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 4, 4, new UpdateAttributesOperation(4, attributeUpdates)));
Assert.assertEquals(editOps, command.getDurableLogEditsFromUser());
// Case 4: Add wrong inputs.
Mockito.doReturn(true).doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doThrow(NumberFormatException.class).doThrow(NullPointerException.class).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("wrong").doReturn("replace").doReturn("replace").when(command).getStringUserInput(Mockito.any());
command.getDurableLogEditsFromUser();
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class StreamSegmentContainer method mergeStreamSegment.
private CompletableFuture<MergeStreamSegmentResult> mergeStreamSegment(long targetSegmentId, long sourceSegmentId, AttributeUpdateCollection attributeUpdates, TimeoutTimer timer) {
// Get a reference to the source segment's metadata now, before the merge. It may not be accessible afterwards.
SegmentMetadata sourceMetadata = this.metadata.getStreamSegmentMetadata(sourceSegmentId);
CompletableFuture<Void> sealResult = trySealStreamSegment(sourceMetadata, timer.getRemaining());
if (sourceMetadata.getLength() == 0) {
// writes in the pipeline. As such, we cannot pipeline the two operations, and must wait for the seal to finish first.
return sealResult.thenComposeAsync(v -> {
// to and including the seal, so if there were any writes outstanding before, they should now be reflected in it.
if (sourceMetadata.getLength() == 0) {
// Source is still empty after sealing - OK to delete.
log.debug("{}: Updating attributes (if any) and deleting empty source segment instead of merging {}.", this.traceObjectId, sourceMetadata.getName());
// Execute the attribute update on the target segment only if needed.
Supplier<CompletableFuture<Void>> updateAttributesIfNeeded = () -> attributeUpdates == null ? CompletableFuture.completedFuture(null) : updateAttributesForSegment(targetSegmentId, attributeUpdates, timer.getRemaining());
return updateAttributesIfNeeded.get().thenCompose(v2 -> deleteStreamSegment(sourceMetadata.getName(), timer.getRemaining()).thenApply(v3 -> new MergeStreamSegmentResult(this.metadata.getStreamSegmentMetadata(targetSegmentId).getLength(), sourceMetadata.getLength(), sourceMetadata.getAttributes())));
} else {
// Source now has some data - we must merge the two.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return processAttributeUpdaterOperation(operation, timer).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}, this.executor);
} else {
// Source is not empty, so we cannot delete. Make use of the DurableLog's pipelining abilities by queueing up
// the Merge right after the Seal.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return CompletableFuture.allOf(sealResult, processAttributeUpdaterOperation(operation, timer)).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class MemoryStateUpdater method addToReadIndex.
/**
* Registers the given operation in the ReadIndex.
*
* @param operation The operation to register.
* @throws CacheFullException If the operation could not be added to the {@link ReadIndex} due to the cache being
* full and unable to evict anything to make room for more.
* @throws ServiceHaltException If any unexpected exception occurred that prevented the operation from being
* added to the {@link ReadIndex}. Unexpected exceptions are all exceptions other than those declared in this
* method or that indicate we are shutting down or that the segment has been deleted.
*/
private void addToReadIndex(StorageOperation operation) throws ServiceHaltException, CacheFullException {
try {
if (operation instanceof StreamSegmentAppendOperation) {
// Record a StreamSegmentAppendOperation. Just in case, we also support this type of operation, but we need to
// log a warning indicating so. This means we do not optimize memory properly, and we end up storing data
// in two different places.
StreamSegmentAppendOperation appendOperation = (StreamSegmentAppendOperation) operation;
this.readIndex.append(appendOperation.getStreamSegmentId(), appendOperation.getStreamSegmentOffset(), appendOperation.getData());
} else if (operation instanceof MergeSegmentOperation) {
// Record a MergeSegmentOperation. We call beginMerge here, and the StorageWriter will call completeMerge.
MergeSegmentOperation mergeOperation = (MergeSegmentOperation) operation;
this.readIndex.beginMerge(mergeOperation.getStreamSegmentId(), mergeOperation.getStreamSegmentOffset(), mergeOperation.getSourceSegmentId());
} else {
assert !(operation instanceof CachedStreamSegmentAppendOperation) : "attempted to add a CachedStreamSegmentAppendOperation to the ReadIndex";
}
} catch (ObjectClosedException | StreamSegmentNotExistsException ex) {
// The Segment is in the process of being deleted. We usually end up in here because a concurrent delete
// request has updated the metadata while we were executing.
log.warn("Not adding operation '{}' to ReadIndex because it refers to a deleted StreamSegment.", operation);
} catch (CacheFullException ex) {
// Record the operation that we couldn't add and re-throw the exception as we cannot do anything about it here.
log.warn("Not adding operation '{}' to ReadIndex because the Cache is full.", operation);
throw ex;
} catch (Exception ex) {
throw new ServiceHaltException(String.format("Unable to add operation '%s' to ReadIndex.", operation), ex);
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class ContainerMetadataUpdateTransaction method preProcessOperation.
/**
* Pre-processes the given Operation. See OperationMetadataUpdater.preProcessOperation for more details on behavior.
*
* @param operation The operation to pre-process.
* @throws ContainerException If the given operation was rejected given the current state of the container metadata.
* @throws StreamSegmentException If the given operation was incompatible with the current state of the Segment.
* For example: StreamSegmentNotExistsException, StreamSegmentSealedException or
* StreamSegmentMergedException.
*/
void preProcessOperation(Operation operation) throws ContainerException, StreamSegmentException {
checkNotSealed();
if (operation instanceof SegmentOperation) {
val segmentMetadata = getSegmentUpdateTransaction(((SegmentOperation) operation).getStreamSegmentId());
if (segmentMetadata.isDeleted()) {
throw new StreamSegmentNotExistsException(segmentMetadata.getName());
}
if (operation instanceof StreamSegmentAppendOperation) {
segmentMetadata.preProcessOperation((StreamSegmentAppendOperation) operation);
} else if (operation instanceof StreamSegmentSealOperation) {
segmentMetadata.preProcessOperation((StreamSegmentSealOperation) operation);
} else if (operation instanceof MergeSegmentOperation) {
MergeSegmentOperation mbe = (MergeSegmentOperation) operation;
SegmentMetadataUpdateTransaction sourceMetadata = getSegmentUpdateTransaction(mbe.getSourceSegmentId());
sourceMetadata.preProcessAsSourceSegment(mbe);
segmentMetadata.preProcessAsTargetSegment(mbe, sourceMetadata);
} else if (operation instanceof UpdateAttributesOperation) {
segmentMetadata.preProcessOperation((UpdateAttributesOperation) operation);
} else if (operation instanceof StreamSegmentTruncateOperation) {
segmentMetadata.preProcessOperation((StreamSegmentTruncateOperation) operation);
} else if (operation instanceof DeleteSegmentOperation) {
segmentMetadata.preProcessOperation((DeleteSegmentOperation) operation);
}
}
if (operation instanceof MetadataCheckpointOperation) {
// MetadataCheckpointOperations do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((MetadataCheckpointOperation) operation);
} else if (operation instanceof StorageMetadataCheckpointOperation) {
// StorageMetadataCheckpointOperation do not require preProcess and accept; they can be handled in a single stage.
processMetadataOperation((StorageMetadataCheckpointOperation) operation);
} else if (operation instanceof StreamSegmentMapOperation) {
preProcessMetadataOperation((StreamSegmentMapOperation) operation);
}
}
use of io.pravega.segmentstore.server.logs.operations.MergeSegmentOperation in project pravega by pravega.
the class SegmentAggregator method mergeCompleted.
/**
* Executes post-Storage merge tasks, including state and metadata updates.
*/
private void mergeCompleted(SegmentProperties segmentProperties, UpdateableSegmentMetadata transactionMetadata, MergeSegmentOperation mergeOp) {
// We have processed a MergeSegmentOperation, pop the first operation off and decrement the counter.
StorageOperation processedOperation = this.operations.removeFirst();
assert processedOperation != null && processedOperation instanceof MergeSegmentOperation : "First outstanding operation was not a MergeSegmentOperation";
MergeSegmentOperation mop = (MergeSegmentOperation) processedOperation;
assert mop.getSourceSegmentId() == transactionMetadata.getId() : "First outstanding operation was a MergeSegmentOperation for the wrong Transaction id.";
int newCount = this.mergeTransactionCount.decrementAndGet();
assert newCount >= 0 : "Negative value for mergeTransactionCount";
// Post-merger validation. Verify we are still in agreement with the storage.
long expectedNewLength = this.metadata.getStorageLength() + mergeOp.getLength();
if (segmentProperties.getLength() != expectedNewLength) {
throw new CompletionException(new DataCorruptionException(String.format("Transaction Segment '%s' was merged into parent '%s' but the parent segment has an unexpected StorageLength after the merger. Previous=%d, MergeLength=%d, Expected=%d, Actual=%d", transactionMetadata.getName(), this.metadata.getName(), segmentProperties.getLength(), mergeOp.getLength(), expectedNewLength, segmentProperties.getLength())));
}
updateMetadata(segmentProperties);
updateMetadataForTransactionPostMerger(transactionMetadata, mop.getStreamSegmentId());
}
Aggregations