use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainerMetadataTests method testMapStreamSegment.
/**
* Tests the ability to map new StreamSegments.
*/
@Test
public void testMapStreamSegment() {
final UpdateableContainerMetadata m = new MetadataBuilder(CONTAINER_ID).build();
final HashMap<Long, Long> segmentIds = new HashMap<>();
for (long i = 0; i < SEGMENT_COUNT; i++) {
final long segmentId = segmentIds.size();
String segmentName = getName(segmentId);
// This should work.
// Change the sequence number, before mapping.
m.nextOperationSequenceNumber();
m.mapStreamSegmentId(segmentName, segmentId);
segmentIds.put(segmentId, m.getOperationSequenceNumber());
Assert.assertEquals("Unexpected value from getStreamSegmentId (Stand-alone Segment).", segmentId, m.getStreamSegmentId(segmentName, false));
// Now check that we cannot re-map the same SegmentId or SegmentName.
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same SegmentId twice.", () -> m.mapStreamSegmentId(segmentName + "foo", segmentId), ex -> ex instanceof IllegalArgumentException);
AssertExtensions.assertThrows("mapStreamSegmentId allowed mapping the same SegmentName twice.", () -> m.mapStreamSegmentId(segmentName, segmentId + 1), ex -> ex instanceof IllegalArgumentException);
}
// Check getLastUsed.
for (Map.Entry<Long, Long> e : segmentIds.entrySet()) {
// Increment the SeqNo so we can verify 'updateLastUsed'.
m.nextOperationSequenceNumber();
SegmentMetadata segmentMetadata = m.getStreamSegmentMetadata(e.getKey());
Assert.assertEquals("Unexpected value for getLastUsed for untouched segment.", (long) e.getValue(), segmentMetadata.getLastUsed());
m.getStreamSegmentId(segmentMetadata.getName(), false);
Assert.assertEquals("Unexpected value for getLastUsed for untouched segment.", (long) e.getValue(), segmentMetadata.getLastUsed());
m.getStreamSegmentId(segmentMetadata.getName(), true);
Assert.assertEquals("Unexpected value for getLastUsed for touched segment.", m.getOperationSequenceNumber(), segmentMetadata.getLastUsed());
}
Collection<Long> metadataSegmentIds = m.getAllStreamSegmentIds();
AssertExtensions.assertContainsSameElements("Metadata does not contain the expected Segment Ids", segmentIds.keySet(), metadataSegmentIds);
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class ContainerEventProcessorTests method testAppendWithFailingSegment.
/**
* Check the behavior of the EventProcessor when there are failures when adding events to the internal Segment.
*
* @throws Exception
*/
@Test(timeout = 10000)
public void testAppendWithFailingSegment() throws Exception {
DirectSegmentAccess faultySegment = mock(SegmentMock.class);
when(faultySegment.append(any(), any(), any())).thenThrow(NullPointerException.class);
SegmentMetadata mockMetadata = mock(SegmentMetadata.class);
when(mockMetadata.getLength()).thenReturn(0L);
when(faultySegment.getInfo()).thenReturn(mockMetadata);
Function<String, CompletableFuture<DirectSegmentAccess>> faultySegmentSupplier = s -> CompletableFuture.completedFuture(faultySegment);
@Cleanup ContainerEventProcessor eventProcessorService = new ContainerEventProcessorImpl(0, faultySegmentSupplier, ITERATION_DELAY, CONTAINER_OPERATION_TIMEOUT, this.executorService());
int maxItemsProcessed = 10;
int maxOutstandingBytes = 4 * 1024 * 1024;
int truncationDataSize = 500;
ContainerEventProcessor.EventProcessorConfig config = new ContainerEventProcessor.EventProcessorConfig(maxItemsProcessed, maxOutstandingBytes, truncationDataSize);
Function<List<BufferView>, CompletableFuture<Void>> doNothing = l -> null;
@Cleanup ContainerEventProcessor.EventProcessor processor = eventProcessorService.forConsumer("testSegmentMax", doNothing, config).get(TIMEOUT_FUTURE.toSeconds(), TimeUnit.SECONDS);
// Verify that the client gets the exception if there is some issue on add().
BufferView event = new ByteArraySegment("Test".getBytes());
AssertExtensions.assertThrows(NullPointerException.class, () -> processor.add(event, TIMEOUT_FUTURE).join());
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainer method getAndCacheAttributes.
/**
* Gets the values of the given (Core and Extended) Attribute Ids for the given segment.
*
* @param segmentMetadata The SegmentMetadata for the Segment to retrieve attribute values for.
* @param attributeIds A Collection of AttributeIds to retrieve.
* @param cache If true, any Extended Attribute value that is not present in the SegmentMetadata cache will
* be added to that (using a conditional updateAttributes() call) before completing.
* @param timer Timer for the operation.
* @return A CompletableFuture that, when completed normally, will contain the desired result. If the operation failed,
* it will be completed with the appropriate exception. If cache==true and the conditional call to updateAttributes()
* could not be completed because of a conflicting update, it will be failed with BadAttributeUpdateException, in which
* case a retry is warranted.
*/
private CompletableFuture<Map<AttributeId, Long>> getAndCacheAttributes(SegmentMetadata segmentMetadata, Collection<AttributeId> attributeIds, boolean cache, TimeoutTimer timer) {
// Collect Core Attributes and Cached Extended Attributes.
Map<AttributeId, Long> result = new HashMap<>();
Map<AttributeId, Long> metadataAttributes = segmentMetadata.getAttributes();
ArrayList<AttributeId> extendedAttributeIds = new ArrayList<>();
attributeIds.forEach(attributeId -> {
Long v = metadataAttributes.get(attributeId);
if (v != null) {
// This attribute is cached in the Segment Metadata, even if it has a value equal to Attributes.NULL_ATTRIBUTE_VALUE.
result.put(attributeId, v);
} else if (!Attributes.isCoreAttribute(attributeId)) {
extendedAttributeIds.add(attributeId);
}
});
if (extendedAttributeIds.isEmpty()) {
// Nothing to lookup in the Attribute Index, so bail out early.
return CompletableFuture.completedFuture(result);
}
// Collect remaining Extended Attributes.
CompletableFuture<Map<AttributeId, Long>> r = this.attributeIndex.forSegment(segmentMetadata.getId(), timer.getRemaining()).thenComposeAsync(idx -> idx.get(extendedAttributeIds, timer.getRemaining()), this.executor).thenApplyAsync(extendedAttributes -> {
if (extendedAttributeIds.size() == extendedAttributes.size()) {
// We found a value for each Attribute Id. Nothing more to do.
return extendedAttributes;
}
// Insert a NULL_ATTRIBUTE_VALUE for each missing value.
Map<AttributeId, Long> allValues = new HashMap<>(extendedAttributes);
extendedAttributeIds.stream().filter(id -> !extendedAttributes.containsKey(id)).forEach(id -> allValues.put(id, Attributes.NULL_ATTRIBUTE_VALUE));
return allValues;
}, this.executor);
if (cache && !segmentMetadata.isSealed()) {
// Add them to the cache if requested.
r = r.thenComposeAsync(extendedAttributes -> {
// Update the in-memory Segment Metadata using a special update (AttributeUpdateType.None, which should
// complete if the attribute is not currently set). If it has some value, then a concurrent update
// must have changed it and we cannot update anymore.
val updates = new AttributeUpdateCollection();
for (val e : extendedAttributes.entrySet()) {
updates.add(new AttributeUpdate(e.getKey(), AttributeUpdateType.None, e.getValue()));
}
// invoke this one again.
return addOperation(new UpdateAttributesOperation(segmentMetadata.getId(), updates), timer.getRemaining()).thenApply(v -> extendedAttributes);
}, this.executor);
}
// Compile the final result.
return r.thenApply(extendedAttributes -> {
result.putAll(extendedAttributes);
return result;
});
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainer method processAttributeUpdaterOperation.
/**
* Processes the given AttributeUpdateOperation with exactly one retry in case it was rejected because of an attribute
* update failure due to the attribute value missing from the in-memory cache.
*
* @param operation The Operation to process.
* @param timer Timer for the operation.
* @param <T> Type of the operation.
* @return A CompletableFuture that, when completed normally, will indicate that the Operation has been successfully
* processed. If it failed, it will be completed with an appropriate exception.
*/
private <T extends Operation & AttributeUpdaterOperation> CompletableFuture<Void> processAttributeUpdaterOperation(T operation, TimeoutTimer timer) {
Collection<AttributeUpdate> updates = operation.getAttributeUpdates();
if (updates == null || updates.isEmpty()) {
// No need for extra complicated handling.
return addOperation(operation, timer.getRemaining());
}
return Futures.exceptionallyCompose(addOperation(operation, timer.getRemaining()), ex -> {
// We only retry BadAttributeUpdateExceptions if it has the PreviousValueMissing flag set.
ex = Exceptions.unwrap(ex);
if (ex instanceof BadAttributeUpdateException && ((BadAttributeUpdateException) ex).isPreviousValueMissing()) {
// Get the missing attributes and load them into the cache, then retry the operation, exactly once.
SegmentMetadata segmentMetadata = this.metadata.getStreamSegmentMetadata(operation.getStreamSegmentId());
Collection<AttributeId> attributeIds = updates.stream().map(AttributeUpdate::getAttributeId).filter(id -> !Attributes.isCoreAttribute(id)).collect(Collectors.toList());
if (!attributeIds.isEmpty()) {
// This only makes sense if a core attribute was missing.
return getAndCacheAttributes(segmentMetadata, attributeIds, true, timer).thenComposeAsync(attributes -> {
// Final attempt - now that we should have the attributes cached.
return addOperation(operation, timer.getRemaining());
}, this.executor);
}
}
// Anything else is non-retryable; rethrow.
return Futures.failedFuture(ex);
});
}
use of io.pravega.segmentstore.server.SegmentMetadata in project pravega by pravega.
the class StreamSegmentContainer method mergeStreamSegment.
private CompletableFuture<MergeStreamSegmentResult> mergeStreamSegment(long targetSegmentId, long sourceSegmentId, AttributeUpdateCollection attributeUpdates, TimeoutTimer timer) {
// Get a reference to the source segment's metadata now, before the merge. It may not be accessible afterwards.
SegmentMetadata sourceMetadata = this.metadata.getStreamSegmentMetadata(sourceSegmentId);
CompletableFuture<Void> sealResult = trySealStreamSegment(sourceMetadata, timer.getRemaining());
if (sourceMetadata.getLength() == 0) {
// writes in the pipeline. As such, we cannot pipeline the two operations, and must wait for the seal to finish first.
return sealResult.thenComposeAsync(v -> {
// to and including the seal, so if there were any writes outstanding before, they should now be reflected in it.
if (sourceMetadata.getLength() == 0) {
// Source is still empty after sealing - OK to delete.
log.debug("{}: Updating attributes (if any) and deleting empty source segment instead of merging {}.", this.traceObjectId, sourceMetadata.getName());
// Execute the attribute update on the target segment only if needed.
Supplier<CompletableFuture<Void>> updateAttributesIfNeeded = () -> attributeUpdates == null ? CompletableFuture.completedFuture(null) : updateAttributesForSegment(targetSegmentId, attributeUpdates, timer.getRemaining());
return updateAttributesIfNeeded.get().thenCompose(v2 -> deleteStreamSegment(sourceMetadata.getName(), timer.getRemaining()).thenApply(v3 -> new MergeStreamSegmentResult(this.metadata.getStreamSegmentMetadata(targetSegmentId).getLength(), sourceMetadata.getLength(), sourceMetadata.getAttributes())));
} else {
// Source now has some data - we must merge the two.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return processAttributeUpdaterOperation(operation, timer).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}, this.executor);
} else {
// Source is not empty, so we cannot delete. Make use of the DurableLog's pipelining abilities by queueing up
// the Merge right after the Seal.
MergeSegmentOperation operation = new MergeSegmentOperation(targetSegmentId, sourceSegmentId, attributeUpdates);
return CompletableFuture.allOf(sealResult, processAttributeUpdaterOperation(operation, timer)).thenApply(v2 -> new MergeStreamSegmentResult(operation.getStreamSegmentOffset() + operation.getLength(), operation.getLength(), sourceMetadata.getAttributes()));
}
}
Aggregations