use of io.pravega.segmentstore.server.OperationLogFactory in project pravega by pravega.
the class DebugStreamSegmentContainerTests method testDataRecoveryStorageLevel.
/**
* Use a storage instance to create segments. Lists the segments from the storage and and then recreates them using
* debug segment containers. Before re-creating(or registering), the segments are mapped to their respective debug
* segment container. Once registered, segment's properties are matched to verify if the test was successful or not.
*/
@Test
public void testDataRecoveryStorageLevel() throws Exception {
// Segments are mapped to four different containers.
int containerCount = 4;
int segmentsToCreateCount = 50;
// Create a storage.
@Cleanup val baseStorage = new InMemoryStorage();
@Cleanup val s = new RollingStorage(baseStorage, new SegmentRollingPolicy(1));
s.initialize(1);
log.info("Created a storage instance");
// Record details(name, container Id & sealed status) of each segment to be created.
Set<String> sealedSegments = new HashSet<>();
byte[] data = "data".getBytes();
SegmentToContainerMapper segToConMapper = new SegmentToContainerMapper(containerCount, true);
Map<Integer, ArrayList<String>> segmentByContainers = new HashMap<>();
// Create segments and get their container Ids, sealed status and names to verify.
for (int i = 0; i < segmentsToCreateCount; i++) {
String segmentName = "segment-" + RANDOM.nextInt();
// Use segmentName to map to different containers.
int containerId = segToConMapper.getContainerId(segmentName);
ArrayList<String> segmentsList = segmentByContainers.get(containerId);
if (segmentsList == null) {
segmentsList = new ArrayList<>();
segmentByContainers.put(containerId, segmentsList);
}
segmentByContainers.get(containerId).add(segmentName);
// Create segments, write data and randomly seal some of them.
val wh1 = s.create(segmentName);
// Write data.
s.write(wh1, 0, new ByteArrayInputStream(data), data.length);
if (RANDOM.nextBoolean()) {
s.seal(wh1);
sealedSegments.add(segmentName);
}
}
log.info("Created some segments using the storage.");
@Cleanup TestContext context = createContext(executorService());
OperationLogFactory localDurableLogFactory = new DurableLogFactory(DEFAULT_DURABLE_LOG_CONFIG, context.dataLogFactory, executorService());
Map<Integer, DebugStreamSegmentContainer> debugStreamSegmentContainerMap = new HashMap<>();
log.info("Start a debug segment container corresponding to each container id.");
for (int containerId = 0; containerId < containerCount; containerId++) {
MetadataCleanupContainer localContainer = new MetadataCleanupContainer(containerId, CONTAINER_CONFIG, localDurableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
Services.startAsync(localContainer, executorService()).join();
debugStreamSegmentContainerMap.put(containerId, localContainer);
}
log.info("Recover all segments using the storage and debug segment containers.");
recoverAllSegments(new AsyncStorageWrapper(s, executorService()), debugStreamSegmentContainerMap, executorService(), TIMEOUT);
// Re-create all segments which were listed.
for (int containerId = 0; containerId < containerCount; containerId++) {
for (String segment : segmentByContainers.get(containerId)) {
SegmentProperties props = debugStreamSegmentContainerMap.get(containerId).getStreamSegmentInfo(segment, TIMEOUT).join();
Assert.assertEquals("Segment length mismatch.", data.length, props.getLength());
Assert.assertEquals("Sealed status of the segment don't match.", sealedSegments.contains(segment), props.isSealed());
}
debugStreamSegmentContainerMap.get(containerId).close();
}
}
use of io.pravega.segmentstore.server.OperationLogFactory in project pravega by pravega.
the class StreamSegmentContainerTests method testForcedMetadataCleanup.
/**
* Tests the case when the ContainerMetadata has filled up to capacity (with segments and we cannot map anymore segments).
*/
@Test
public void testForcedMetadataCleanup() throws Exception {
final int maxSegmentCount = 3;
final int createdSegmentCount = maxSegmentCount * 2;
final ContainerConfig containerConfig = ContainerConfig.builder().with(ContainerConfig.SEGMENT_METADATA_EXPIRATION_SECONDS, (int) DEFAULT_CONFIG.getSegmentMetadataExpiration().getSeconds()).with(ContainerConfig.MAX_ACTIVE_SEGMENT_COUNT, maxSegmentCount + EXPECTED_PINNED_SEGMENT_COUNT).with(ContainerConfig.STORAGE_SNAPSHOT_TIMEOUT_SECONDS, (int) DEFAULT_CONFIG.getStorageSnapshotTimeout().getSeconds()).build();
// We need a special DL config so that we can force truncations after every operation - this will speed up metadata
// eviction eligibility.
final DurableLogConfig durableLogConfig = DurableLogConfig.builder().with(DurableLogConfig.CHECKPOINT_MIN_COMMIT_COUNT, 1).with(DurableLogConfig.CHECKPOINT_COMMIT_COUNT, 10).with(DurableLogConfig.CHECKPOINT_TOTAL_COMMIT_LENGTH, 10L * 1024 * 1024).build();
@Cleanup TestContext context = createContext(containerConfig);
OperationLogFactory localDurableLogFactory = new DurableLogFactory(durableLogConfig, context.dataLogFactory, executorService());
@Cleanup MetadataCleanupContainer localContainer = new MetadataCleanupContainer(CONTAINER_ID, containerConfig, localDurableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, context.storageFactory, context.getDefaultExtensions(), executorService());
localContainer.startAsync().awaitRunning();
// Create the segments.
val segments = new ArrayList<String>();
for (int i = 0; i < createdSegmentCount; i++) {
String name = getSegmentName(i);
segments.add(name);
localContainer.createStreamSegment(name, getSegmentType(name), null, TIMEOUT).join();
}
// Activate three segments (this should fill up available capacity).
activateSegment(segments.get(2), localContainer).join();
activateSegment(segments.get(4), localContainer).join();
activateSegment(segments.get(5), localContainer).join();
// At this point, the active segments should be: 2, 4 and 5.
// Verify we cannot activate any other segment.
AssertExtensions.assertSuppliedFutureThrows("getSegmentId() allowed mapping more segments than the metadata can support.", () -> activateSegment(segments.get(1), localContainer), ex -> ex instanceof TooManyActiveSegmentsException);
// Test the ability to forcefully evict items from the metadata when there is pressure and we need to register something new.
// Case 1: following a Segment deletion.
localContainer.deleteStreamSegment(segments.get(2), TIMEOUT).join();
val segment1Activation = tryActivate(localContainer, segments.get(1), segments.get(4));
val segment1Info = segment1Activation.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNotNull("Unable to properly activate dormant segment (1).", segment1Info);
// Case 2: following a Merge.
localContainer.sealStreamSegment(segments.get(5), TIMEOUT).join();
localContainer.mergeStreamSegment(segments.get(4), segments.get(5), TIMEOUT).join();
val segment0Activation = tryActivate(localContainer, segments.get(0), segments.get(3));
val segment0Info = segment0Activation.get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertNotNull("Unable to properly activate dormant segment (0).", segment0Info);
tryActivate(localContainer, segments.get(1), segments.get(3)).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// At this point the active segments should be: 0, 1 and 3.
Assert.assertNotNull("Pre-activated segment did not stay in metadata (3).", localContainer.getStreamSegmentInfo(segments.get(3), TIMEOUT).join());
Assert.assertNotNull("Pre-activated segment did not stay in metadata (1).", localContainer.getStreamSegmentInfo(segments.get(1), TIMEOUT).join());
Assert.assertNotNull("Pre-activated segment did not stay in metadata (0).", localContainer.getStreamSegmentInfo(segments.get(0), TIMEOUT).join());
context.container.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.server.OperationLogFactory in project pravega by pravega.
the class RestoreBackUpDataRecoveryTest method startDebugSegmentContainers.
// Creates debug segment container instances, puts them in a map and returns it.
private Map<Integer, DebugStreamSegmentContainer> startDebugSegmentContainers(DebugStreamSegmentContainerTests.TestContext context, int containerCount, InMemoryDurableDataLogFactory dataLogFactory, StorageFactory storageFactory) throws Exception {
// Start a debug segment container corresponding to the given container Id and put it in the Hashmap with the Id.
Map<Integer, DebugStreamSegmentContainer> debugStreamSegmentContainerMap = new HashMap<>();
OperationLogFactory localDurableLogFactory = new DurableLogFactory(NO_TRUNCATIONS_DURABLE_LOG_CONFIG, dataLogFactory, executorService());
// Create a debug segment container instances using a
for (int containerId = 0; containerId < containerCount; containerId++) {
DebugStreamSegmentContainerTests.MetadataCleanupContainer debugStreamSegmentContainer = new DebugStreamSegmentContainerTests.MetadataCleanupContainer(containerId, CONTAINER_CONFIG, localDurableLogFactory, context.readIndexFactory, context.attributeIndexFactory, context.writerFactory, storageFactory, context.getDefaultExtensions(), executorService());
Services.startAsync(debugStreamSegmentContainer, executorService()).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
debugStreamSegmentContainerMap.put(containerId, debugStreamSegmentContainer);
}
return debugStreamSegmentContainerMap;
}
Aggregations