use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class StreamSegmentMapperTests method testGetStreamSegmentInfoWithConcurrency.
/**
* Tests GetStreamSegmentInfo when it is invoked in parallel with a Segment assignment.
*/
@Test
public void testGetStreamSegmentInfoWithConcurrency() throws Exception {
// is driven by the same code for Transactions as well.
final String segmentName = "Segment";
final long segmentId = 1;
final SegmentProperties storageInfo = StreamSegmentInformation.builder().name(segmentName).length(123).sealed(true).build();
final long metadataLength = storageInfo.getLength() + 1;
HashSet<String> storageSegments = new HashSet<>();
storageSegments.add(segmentName);
@Cleanup TestContext context = new TestContext();
AtomicInteger storageGetCount = new AtomicInteger();
setupStorageGetHandler(context, storageSegments, sn -> {
storageGetCount.incrementAndGet();
return storageInfo;
});
setSavedState(segmentName, segmentId, 0L, ATTRIBUTE_COUNT, context);
val segmentState = context.stateStore.get(segmentName, TIMEOUT).join();
Map<UUID, Long> expectedAttributes = segmentState == null ? null : segmentState.getAttributes();
CompletableFuture<Void> addInvoked = new CompletableFuture<>();
context.operationLog.addHandler = op -> {
addInvoked.join();
// Need to set SegmentId on operation.
StreamSegmentMapOperation sop = (StreamSegmentMapOperation) op;
UpdateableSegmentMetadata segmentMetadata = context.metadata.mapStreamSegmentId(segmentName, segmentId);
segmentMetadata.setStorageLength(sop.getLength());
segmentMetadata.setLength(metadataLength);
segmentMetadata.updateAttributes(expectedAttributes);
if (sop.isSealed()) {
segmentMetadata.markSealed();
}
return CompletableFuture.completedFuture(null);
};
// Second call is designed to hit when the first call still tries to assign the id, hence we test normal queueing.
context.mapper.getOrAssignStreamSegmentId(segmentName, TIMEOUT, id -> CompletableFuture.completedFuture(null));
// Concurrently with the map, request a Segment Info.
CompletableFuture<SegmentProperties> segmentInfoFuture = context.mapper.getStreamSegmentInfo(segmentName, TIMEOUT);
Assert.assertFalse("getSegmentInfo returned a completed future.", segmentInfoFuture.isDone());
// Release the OperationLog add and verify the Segment Info has been served with information from the Metadata.
addInvoked.complete(null);
SegmentProperties segmentInfo = segmentInfoFuture.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
val expectedInfo = context.metadata.getStreamSegmentMetadata(segmentId);
assertEquals("Unexpected Segment Info returned.", expectedInfo, segmentInfo);
SegmentMetadataComparer.assertSameAttributes("Unexpected attributes returned.", expectedInfo.getAttributes(), segmentInfo);
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class StreamSegmentMapperTests method setupOperationLog.
private void setupOperationLog(TestContext context) {
AtomicLong seqNo = new AtomicLong();
context.operationLog.addHandler = op -> {
long currentSeqNo = seqNo.incrementAndGet();
UpdateableSegmentMetadata sm;
Assert.assertTrue("Unexpected operation type.", op instanceof StreamSegmentMapOperation);
StreamSegmentMapOperation mop = (StreamSegmentMapOperation) op;
if (mop.getStreamSegmentId() == ContainerMetadata.NO_STREAM_SEGMENT_ID) {
mop.setStreamSegmentId(currentSeqNo);
}
if (mop.isTransaction()) {
sm = context.metadata.mapStreamSegmentId(mop.getStreamSegmentName(), mop.getStreamSegmentId(), mop.getParentStreamSegmentId());
} else {
sm = context.metadata.mapStreamSegmentId(mop.getStreamSegmentName(), mop.getStreamSegmentId());
}
sm.setStorageLength(0);
sm.setLength(mop.getLength());
sm.setStartOffset(mop.getStartOffset());
if (mop.isSealed()) {
sm.markSealed();
}
sm.updateAttributes(mop.getAttributes());
return CompletableFuture.completedFuture(null);
};
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class DataRecoveryTest method testRepairLogEditOperationUserInput.
@Test
public void testRepairLogEditOperationUserInput() throws IOException {
// Setup command object.
STATE.set(new AdminCommandState());
Properties pravegaProperties = new Properties();
pravegaProperties.setProperty("pravegaservice.container.count", "1");
pravegaProperties.setProperty("pravegaservice.clusterName", "pravega0");
STATE.get().getConfigBuilder().include(pravegaProperties);
CommandArgs args = new CommandArgs(List.of("0"), STATE.get());
DurableDataLogRepairCommand command = Mockito.spy(new DurableDataLogRepairCommand(args));
// Case 1: Input a Delete Edit Operation with wrong initial/final ids. Then retry with correct ids.
Mockito.doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).doReturn(1L).doReturn(2L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("delete").when(command).getStringUserInput(Mockito.any());
Assert.assertEquals(List.of(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.DELETE_OPERATION, 1, 2, null)), command.getDurableLogEditsFromUser());
// Case 2: Input an Add Edit Operation with a wrong operation type. Then retry with correct operation type.
Mockito.doReturn(true).doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("add").doReturn("wrong").doReturn("add").doReturn("DeleteSegmentOperation").when(command).getStringUserInput(Mockito.any());
DeleteSegmentOperation deleteOperationAdded = new DeleteSegmentOperation(1);
List<DurableDataLogRepairCommand.LogEditOperation> editOps = new ArrayList<>();
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, deleteOperationAdded));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, deleteOperationAdded));
Assert.assertEquals(editOps, command.getDurableLogEditsFromUser());
// Case 3: Create rest of operation types without payload (MergeSegmentOperation, StreamSegmentMapOperation, StreamSegmentTruncateOperation, UpdateAttributesOperation)
long timestamp = System.currentTimeMillis();
UUID uuid = UUID.randomUUID();
editOps.clear();
Mockito.doReturn(true).doReturn(false).doReturn(false).doReturn(true).doReturn(true).doReturn(false).doReturn(false).doReturn(true).doReturn(false).doReturn(true).doReturn(true).doReturn(false).doReturn(false).when(command).confirmContinue();
Mockito.doReturn(1L).doReturn(1L).doReturn(2L).doReturn(1L).doReturn(2L).doReturn(123L).doReturn(2L).doReturn(2L).doReturn(3L).doReturn(1L).doReturn(10L).doReturn(timestamp).doReturn(3L).doReturn(3L).doReturn(4L).doReturn(4L).doReturn(3L).doReturn(1L).doReturn(2L).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("add").doReturn("MergeSegmentOperation").doReturn(uuid.toString()).doReturn("add").doReturn("StreamSegmentMapOperation").doReturn("test").doReturn(uuid.toString()).doReturn("add").doReturn("StreamSegmentTruncateOperation").doReturn("add").doReturn("UpdateAttributesOperation").doReturn(uuid.toString()).when(command).getStringUserInput(Mockito.any());
Mockito.doReturn((int) AttributeUpdateType.Replace.getTypeId()).when(command).getIntUserInput(Mockito.any());
Mockito.doReturn(true).doReturn(true).doReturn(false).doReturn(false).when(command).getBooleanUserInput(Mockito.any());
AttributeUpdateCollection attributeUpdates = new AttributeUpdateCollection();
attributeUpdates.add(new AttributeUpdate(AttributeId.fromUUID(uuid), AttributeUpdateType.Replace, 1, 2));
MergeSegmentOperation mergeSegmentOperation = new MergeSegmentOperation(1, 2, attributeUpdates);
mergeSegmentOperation.setStreamSegmentOffset(123);
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 1, 1, mergeSegmentOperation));
Map<AttributeId, Long> attributes = new HashMap<>();
attributes.put(AttributeId.fromUUID(uuid), 10L);
SegmentProperties segmentProperties = StreamSegmentInformation.builder().name("test").startOffset(2).length(3).storageLength(1).sealed(true).deleted(false).sealedInStorage(true).deletedInStorage(false).attributes(attributes).lastModified(new ImmutableDate(timestamp)).build();
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 2, 2, new StreamSegmentMapOperation(segmentProperties)));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 3, 3, new StreamSegmentTruncateOperation(3, 3)));
editOps.add(new DurableDataLogRepairCommand.LogEditOperation(DurableDataLogRepairCommand.LogEditType.ADD_OPERATION, 4, 4, new UpdateAttributesOperation(4, attributeUpdates)));
Assert.assertEquals(editOps, command.getDurableLogEditsFromUser());
// Case 4: Add wrong inputs.
Mockito.doReturn(true).doReturn(true).doReturn(false).when(command).confirmContinue();
Mockito.doThrow(NumberFormatException.class).doThrow(NullPointerException.class).when(command).getLongUserInput(Mockito.any());
Mockito.doReturn("wrong").doReturn("replace").doReturn("replace").when(command).getStringUserInput(Mockito.any());
command.getDurableLogEditsFromUser();
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class StorageWriterTests method createSegments.
private ArrayList<Long> createSegments(TestContext context) {
ArrayList<Long> segmentIds = new ArrayList<>();
for (int i = 0; i < SEGMENT_COUNT; i++) {
String name = getSegmentName(i);
context.metadata.mapStreamSegmentId(name, i);
initializeSegment(i, context);
segmentIds.add((long) i);
// Add the operation to the log.
StreamSegmentMapOperation mapOp = new StreamSegmentMapOperation(context.storage.getStreamSegmentInfo(name, TIMEOUT).join());
mapOp.setStreamSegmentId(i);
context.dataSource.add(mapOp);
}
return segmentIds;
}
use of io.pravega.segmentstore.server.logs.operations.StreamSegmentMapOperation in project pravega by pravega.
the class DurableLogTests method testTruncateWithoutRecovery.
// endregion
// region Truncation
/**
* Tests the truncate() method without doing any recovery.
*/
@Test
public void testTruncateWithoutRecovery() {
int streamSegmentCount = 50;
int appendsPerStreamSegment = 20;
// Setup a DurableLog and start it.
AtomicReference<TestDurableDataLog> dataLog = new AtomicReference<>();
AtomicReference<Boolean> truncationOccurred = new AtomicReference<>();
@Cleanup TestDurableDataLogFactory dataLogFactory = new TestDurableDataLogFactory(new InMemoryDurableDataLogFactory(MAX_DATA_LOG_APPEND_SIZE, executorService()), dataLog::set);
@Cleanup Storage storage = InMemoryStorageFactory.newStorage(executorService());
storage.initialize(1);
UpdateableContainerMetadata metadata = new MetadataBuilder(CONTAINER_ID).build();
@Cleanup CacheStorage cacheStorage = new DirectMemoryCache(Integer.MAX_VALUE);
@Cleanup CacheManager cacheManager = new CacheManager(CachePolicy.INFINITE, cacheStorage, executorService());
@Cleanup ReadIndex readIndex = new ContainerReadIndex(DEFAULT_READ_INDEX_CONFIG, metadata, storage, cacheManager, executorService());
// First DurableLog. We use this for generating data.
try (DurableLog durableLog = new DurableLog(ContainerSetup.defaultDurableLogConfig(), metadata, dataLogFactory, readIndex, executorService())) {
durableLog.startAsync().awaitRunning();
// Hook up a listener to figure out when truncation actually happens.
dataLog.get().setTruncateCallback(seqNo -> truncationOccurred.set(true));
// Generate some test data (we need to do this after we started the DurableLog because in the process of
// recovery, it wipes away all existing metadata).
Set<Long> streamSegmentIds = createStreamSegmentsWithOperations(streamSegmentCount, durableLog);
List<Operation> queuedOperations = generateOperations(streamSegmentIds, new HashMap<>(), appendsPerStreamSegment, METADATA_CHECKPOINT_EVERY, false, false);
// Process all operations.
OperationWithCompletion.allOf(processOperations(queuedOperations, durableLog)).join();
// Add a MetadataCheckpointOperation at the end, after everything else has processed. This ensures that it
// sits in a DataFrame by itself and enables us to truncate everything at the end.
processOperation(new MetadataCheckpointOperation(), durableLog).completion.join();
awaitLastOperationAdded(durableLog, metadata);
// Get a list of all the operations, before truncation.
List<Operation> originalOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
boolean fullTruncationPossible = false;
long currentTruncatedSeqNo = originalOperations.get(0).getSequenceNumber();
// At the end, verify all operations and all entries in the DataLog were truncated.
for (int i = 0; i < originalOperations.size(); i++) {
Operation currentOperation = originalOperations.get(i);
truncationOccurred.set(false);
if (currentOperation instanceof MetadataCheckpointOperation) {
// Perform the truncation.
durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT).join();
awaitLastOperationAdded(durableLog, metadata);
if (currentOperation.getSequenceNumber() != currentTruncatedSeqNo) {
// If the operation we're about to truncate to is actually the first in the log, then we should
// not be expecting any truncation.
Assert.assertTrue("No truncation occurred even though a valid Truncation Point was passed: " + currentOperation.getSequenceNumber(), truncationOccurred.get());
// Now verify that we get a StorageMetadataCheckpointOperation queued.
AssertExtensions.assertGreaterThan("Expected an operation to be queued as part of truncation.", 0, durableLog.getInMemoryOperationLog().size());
val readAfterTruncate = durableLog.read(1, TIMEOUT).join();
Assert.assertTrue("Expected a StorageMetadataCheckpointOperation to be queued as part of truncation.", readAfterTruncate.poll() instanceof StorageMetadataCheckpointOperation);
}
if (i == originalOperations.size()) {
// Sometimes the Truncation Point is on the same DataFrame as other data, and it's the last DataFrame;
// In that case, it cannot be truncated, since truncating the frame would mean losing the Checkpoint as well.
fullTruncationPossible = durableLog.getInMemoryOperationLog().size() == 0;
}
} else {
// Verify we are not allowed to truncate on non-valid Truncation Points.
AssertExtensions.assertSuppliedFutureThrows("DurableLog allowed truncation on a non-MetadataCheckpointOperation.", () -> durableLog.truncate(currentOperation.getSequenceNumber(), TIMEOUT), ex -> ex instanceof IllegalArgumentException);
Assert.assertFalse("Not expecting a truncation to have occurred.", truncationOccurred.get());
}
}
// Verify that we can still queue operations to the DurableLog and they can be read.
// In this case we'll just queue some StreamSegmentMapOperations.
StreamSegmentMapOperation newOp = new StreamSegmentMapOperation(StreamSegmentInformation.builder().name("foo").build());
if (!fullTruncationPossible) {
// We were not able to do a full truncation before. Do one now, since we are guaranteed to have a new DataFrame available.
MetadataCheckpointOperation lastCheckpoint = new MetadataCheckpointOperation();
durableLog.add(lastCheckpoint, OperationPriority.Normal, TIMEOUT).join();
awaitLastOperationAdded(durableLog, metadata);
durableLog.truncate(lastCheckpoint.getSequenceNumber(), TIMEOUT).join();
}
durableLog.add(newOp, OperationPriority.Normal, TIMEOUT).join();
awaitLastOperationAdded(durableLog, metadata);
// Full Checkpoint + Storage Checkpoint (auto-added)+ new op
final int expectedOperationCount = 3;
List<Operation> newOperations = readUpToSequenceNumber(durableLog, metadata.getOperationSequenceNumber());
Assert.assertEquals("Unexpected number of operations added after full truncation.", expectedOperationCount, newOperations.size());
Assert.assertTrue("Expecting the first operation after full truncation to be a MetadataCheckpointOperation.", newOperations.get(0) instanceof MetadataCheckpointOperation);
Assert.assertTrue("Expecting a StorageMetadataCheckpointOperation to be auto-added after full truncation.", newOperations.get(1) instanceof StorageMetadataCheckpointOperation);
Assert.assertEquals("Unexpected Operation encountered after full truncation.", newOp, newOperations.get(2));
// Stop the processor.
durableLog.stopAsync().awaitTerminated();
}
}
Aggregations