use of io.pravega.segmentstore.contracts.SegmentType in project pravega by pravega.
the class StreamSegmentContainerTests method testBasicConditionalMergeScenarios.
/**
* Test in detail the basic situations that a conditional segment merge can face.
*/
@Test
public void testBasicConditionalMergeScenarios() throws Exception {
@Cleanup TestContext context = createContext();
context.container.startAsync().awaitRunning();
final String parentSegment = "parentSegment";
// This will be the attribute update to execute against the parent segment.
Function<String, AttributeUpdateCollection> attributeUpdateForTxn = txnName -> AttributeUpdateCollection.from(new AttributeUpdate(AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes())), AttributeUpdateType.ReplaceIfEquals, txnName.hashCode() + 1, txnName.hashCode()));
Function<String, Long> getAttributeValue = txnName -> {
AttributeId attributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
return context.container.getAttributes(parentSegment, Collections.singletonList(attributeId), true, TIMEOUT).join().get(attributeId);
};
// Create a parent Segment.
context.container.createStreamSegment(parentSegment, getSegmentType(parentSegment), null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
SegmentType segmentType = getSegmentType(parentSegment);
// Case 1: Create and empty transaction that fails to merge conditionally due to bad attributes.
String txnName = NameUtils.getTransactionNameFromId(parentSegment, UUID.randomUUID());
AttributeId txnAttributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
context.container.createStreamSegment(txnName, segmentType, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
AttributeUpdateCollection attributeUpdates = attributeUpdateForTxn.apply(txnName);
AssertExtensions.assertFutureThrows("Transaction was expected to fail on attribute update", context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT), ex -> ex instanceof BadAttributeUpdateException);
Assert.assertEquals(Attributes.NULL_ATTRIBUTE_VALUE, (long) getAttributeValue.apply(txnName));
// Case 2: Now, we prepare the attributes in the parent segment so the merge of the empty transaction succeeds.
context.container.updateAttributes(parentSegment, AttributeUpdateCollection.from(new AttributeUpdate(txnAttributeId, AttributeUpdateType.Replace, txnName.hashCode())), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// As the source segment is empty, the amount of merged data should be 0.
Assert.assertEquals(0L, context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).getMergedDataLength());
// But the attribute related to that transaction merge on the parent segment should have been updated.
Assert.assertEquals(txnName.hashCode() + 1L, (long) context.container.getAttributes(parentSegment, Collections.singletonList(txnAttributeId), true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(txnAttributeId));
// Case 3: Create a non-empty transaction that should fail due to a conditional attribute update failure.
txnName = NameUtils.getTransactionNameFromId(parentSegment, UUID.randomUUID());
txnAttributeId = AttributeId.fromUUID(UUID.nameUUIDFromBytes(txnName.getBytes()));
attributeUpdates = attributeUpdateForTxn.apply(txnName);
context.container.createStreamSegment(txnName, segmentType, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Add some appends to the transaction.
RefCountByteArraySegment appendData = getAppendData(txnName, 1);
context.container.append(txnName, appendData, null, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// Attempt the conditional merge.
AssertExtensions.assertFutureThrows("Transaction was expected to fail on attribute update", context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT), ex -> ex instanceof BadAttributeUpdateException);
Assert.assertEquals(Attributes.NULL_ATTRIBUTE_VALUE, (long) getAttributeValue.apply(txnName));
// Case 4: Now, we prepare the attributes in the parent segment so the merge of the non-empty transaction succeeds.
context.container.updateAttributes(parentSegment, AttributeUpdateCollection.from(new AttributeUpdate(txnAttributeId, AttributeUpdateType.Replace, txnName.hashCode())), TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
// As the source segment is non-empty, the amount of merged data should be greater than 0.
Assert.assertTrue(context.container.mergeStreamSegment(parentSegment, txnName, attributeUpdates, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).getMergedDataLength() > 0);
// The attribute related to that transaction merge on the parent segment should have been updated as well.
Assert.assertEquals(txnName.hashCode() + 1L, (long) context.container.getAttributes(parentSegment, Collections.singletonList(txnAttributeId), true, TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS).get(txnAttributeId));
context.container.stopAsync().awaitTerminated();
}
use of io.pravega.segmentstore.contracts.SegmentType in project pravega by pravega.
the class TableServiceTests method testEndToEnd.
// endregion
/**
* Tests an End-to-End scenario for a {@link TableStore} implementation using a real implementation of {@link StreamSegmentStore}
* (without any mocks or manual event triggering or other test aids). Features tested:
* - Table Segment creation and deletion.
* - Conditional and unconditional updates.
* - Conditional and unconditional removals.
* - Recovering of Table Segments after failover.
*
* This tests both Hash Table Segments and Fixed-Key-Length Table Segments.
*/
@Test
public void testEndToEnd() throws Exception {
val rnd = new Random(0);
val segmentTypes = new SegmentType[] { SegmentType.builder().tableSegment().build(), SegmentType.builder().fixedKeyLengthTableSegment().build() };
ArrayList<String> segmentNames;
HashMap<BufferView, EntryData> keyInfo;
// Phase 1: Create some segments and update some data (unconditionally).
log.info("Starting Phase 1");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Create the Table Segments.
segmentNames = createSegments(tableStore, segmentTypes);
log.info("Created Segments: {}.", String.join(", ", segmentNames));
// Generate the keys and map them to segments.
keyInfo = generateKeysForSegments(segmentNames, rnd);
// Unconditional updates.
val updates = generateUpdates(keyInfo, false, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished unconditional updates.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 1");
}
// Phase 2: Force a recovery and remove all data (unconditionally)
log.info("Starting Phase 2");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Unconditional removals.
val removals = generateRemovals(keyInfo, false);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 2");
}
// Phase 3: Force a recovery and conditionally update and remove data
log.info("Starting Phase 3");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery).
check(keyInfo, tableStore);
// Conditional update.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
val offsetConditionedUpdates = generateUpdates(keyInfo, true, rnd);
val offsetUpdateVersions = executeOffsetConditionalUpdates(offsetConditionedUpdates, -1L, tableStore);
acceptUpdates(offsetConditionedUpdates, offsetUpdateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Conditional remove.
val removals = generateRemovals(keyInfo, true);
executeRemovals(removals, tableStore);
acceptRemovals(removals, keyInfo);
val offsetConditionedRemovals = generateRemovals(keyInfo, true);
executeOffsetConditonalRemovals(offsetConditionedRemovals, -1L, tableStore);
acceptRemovals(offsetConditionedRemovals, keyInfo);
log.info("Finished conditional removes.");
// Check.
check(keyInfo, tableStore);
log.info("Finished Phase 3");
}
// Phase 4: Force a recovery and conditionally remove all data
log.info("Starting Phase 4");
try (val builder = createBuilder()) {
val tableStore = builder.createTableStoreService();
// Check (after recovery)
check(keyInfo, tableStore);
// Conditional update again.
val updates = generateUpdates(keyInfo, true, rnd);
val updateVersions = executeUpdates(updates, tableStore);
acceptUpdates(updates, updateVersions, keyInfo);
log.info("Finished conditional updates.");
// Check.
check(keyInfo, tableStore);
// Delete all.
val deletions = segmentNames.stream().map(s -> tableStore.deleteSegment(s, false, TIMEOUT)).collect(Collectors.toList());
Futures.allOf(deletions).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
log.info("Finished Phase 4");
}
}
use of io.pravega.segmentstore.contracts.SegmentType in project pravega by pravega.
the class ContainerKeyIndexTests method testRegularSegmentThrottling.
/**
* Tests that regular Segments get the right amount of credits.
*/
@Test
public void testRegularSegmentThrottling() {
@Cleanup val context = new TestContext();
@Cleanup ContainerKeyIndex.SegmentTracker segmentTracker = context.index.new SegmentTracker();
DirectSegmentAccess mockSegment = Mockito.mock(DirectSegmentAccess.class);
SegmentMetadata mockSegmentMetadata = Mockito.mock(SegmentMetadata.class);
// Regular segment.
SegmentType segmentType = SegmentType.builder().build();
Mockito.when(mockSegmentMetadata.getType()).thenReturn(segmentType);
Mockito.when(mockSegment.getInfo()).thenReturn(mockSegmentMetadata);
Mockito.when(mockSegment.getSegmentId()).thenReturn(1L);
int updateSize = TableExtensionConfig.MAX_UNINDEXED_LENGTH.getDefaultValue() - 1;
segmentTracker.throttleIfNeeded(mockSegment, () -> CompletableFuture.completedFuture(null), updateSize).join();
Assert.assertEquals(segmentTracker.getUnindexedSizeBytes(1L), TableExtensionConfig.MAX_UNINDEXED_LENGTH.getDefaultValue() - 1);
}
use of io.pravega.segmentstore.contracts.SegmentType in project pravega by pravega.
the class ContainerKeyIndexTests method testCriticalSegmentThrottling.
/**
* Tests that system-critical Segments get the right amount of credits.
*/
@Test
public void testCriticalSegmentThrottling() {
@Cleanup val context = new TestContext();
@Cleanup ContainerKeyIndex.SegmentTracker segmentTracker = context.index.new SegmentTracker();
DirectSegmentAccess mockSegment = Mockito.mock(DirectSegmentAccess.class);
SegmentMetadata mockSegmentMetadata = Mockito.mock(SegmentMetadata.class);
// System critical segment.
SegmentType segmentType = SegmentType.builder().critical().system().build();
Mockito.when(mockSegmentMetadata.getType()).thenReturn(segmentType);
Mockito.when(mockSegment.getInfo()).thenReturn(mockSegmentMetadata);
Mockito.when(mockSegment.getSegmentId()).thenReturn(1L);
// Update size is 1 byte smaller than the limit, so it should not block.
int updateSize = TableExtensionConfig.SYSTEM_CRITICAL_MAX_UNINDEXED_LENGTH.getDefaultValue() - 1;
segmentTracker.throttleIfNeeded(mockSegment, () -> CompletableFuture.completedFuture(null), updateSize).join();
Assert.assertEquals(segmentTracker.getUnindexedSizeBytes(1L), TableExtensionConfig.SYSTEM_CRITICAL_MAX_UNINDEXED_LENGTH.getDefaultValue() - 1);
// Now, we do another update and check that the Segment has no credit.
AssertExtensions.assertThrows(TimeoutException.class, () -> segmentTracker.throttleIfNeeded(mockSegment, () -> CompletableFuture.completedFuture(null), updateSize).get(10, TimeUnit.MILLISECONDS));
}
use of io.pravega.segmentstore.contracts.SegmentType in project pravega by pravega.
the class MetadataStoreTestBase method testRegisterPinnedSegment.
/**
* Checks that we can create and register a pinned Segment via {@link MetadataStore}.
*/
@Test
public void testRegisterPinnedSegment() {
final String segmentName = "PinnedSegment";
@Cleanup TestContext context = createTestContext();
context.getMetadataStore().createSegment(segmentName, SEGMENT_TYPE, null, TIMEOUT).join();
// Let's register a pinned Segment
SegmentType segmentType = SegmentType.builder().system().internal().critical().build();
long segmentId = context.getMetadataStore().registerPinnedSegment(segmentName, segmentType, null, TIMEOUT).join();
Assert.assertTrue(context.connector.getContainerMetadata().getStreamSegmentMetadata(segmentId).isPinned());
}
Aggregations