use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregator method add.
/**
* Adds the given SegmentOperation to the Aggregator.
*
* @param operation the Operation to add.
* @throws DataCorruptionException If the validation of the given Operation indicates a possible data corruption in
* the code (offset gaps, out-of-order operations, etc.)
* @throws IllegalArgumentException If the validation of the given Operation indicates a possible non-corrupting bug
* in the code.
*/
@Override
public void add(SegmentOperation operation) throws DataCorruptionException {
Exceptions.checkNotClosed(isClosed(), this);
Preconditions.checkArgument(operation.getStreamSegmentId() == this.metadata.getId(), "Operation '%s' refers to a different Segment than this one (%s).", operation, this.metadata.getId());
if (isSegmentDeleted()) {
return;
}
boolean processed = false;
if (operation instanceof StreamSegmentSealOperation) {
this.state.seal();
processed = true;
} else if (operation instanceof AttributeUpdaterOperation) {
AttributeUpdaterOperation op = (AttributeUpdaterOperation) operation;
if (this.state.hasSeal()) {
if (op.isInternal() && op.hasOnlyCoreAttributes()) {
log.debug("{}: Ignored internal operation on sealed segment {}.", this.traceObjectId, operation);
return;
} else {
throw new DataCorruptionException(String.format("Illegal operation for a sealed Segment; received '%s'.", operation));
}
}
processed = this.state.include(op);
}
if (processed) {
log.debug("{}: Add {}; OpCount={}.", this.traceObjectId, operation, this.state.size());
}
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class StreamSegmentContainer method processAttributeUpdaterOperation.
/**
* Processes the given AttributeUpdateOperation with exactly one retry in case it was rejected because of an attribute
* update failure due to the attribute value missing from the in-memory cache.
*
* @param operation The Operation to process.
* @param timer Timer for the operation.
* @param <T> Type of the operation.
* @return A CompletableFuture that, when completed normally, will indicate that the Operation has been successfully
* processed. If it failed, it will be completed with an appropriate exception.
*/
private <T extends Operation & AttributeUpdaterOperation> CompletableFuture<Void> processAttributeUpdaterOperation(T operation, TimeoutTimer timer) {
Collection<AttributeUpdate> updates = operation.getAttributeUpdates();
if (updates == null || updates.isEmpty()) {
// No need for extra complicated handling.
return addOperation(operation, timer.getRemaining());
}
return Futures.exceptionallyCompose(addOperation(operation, timer.getRemaining()), ex -> {
// We only retry BadAttributeUpdateExceptions if it has the PreviousValueMissing flag set.
ex = Exceptions.unwrap(ex);
if (ex instanceof BadAttributeUpdateException && ((BadAttributeUpdateException) ex).isPreviousValueMissing()) {
// Get the missing attributes and load them into the cache, then retry the operation, exactly once.
SegmentMetadata segmentMetadata = this.metadata.getStreamSegmentMetadata(operation.getStreamSegmentId());
Collection<AttributeId> attributeIds = updates.stream().map(AttributeUpdate::getAttributeId).filter(id -> !Attributes.isCoreAttribute(id)).collect(Collectors.toList());
if (!attributeIds.isEmpty()) {
// This only makes sense if a core attribute was missing.
return getAndCacheAttributes(segmentMetadata, attributeIds, true, timer).thenComposeAsync(attributes -> {
// Final attempt - now that we should have the attributes cached.
return addOperation(operation, timer.getRemaining());
}, this.executor);
}
}
// Anything else is non-retryable; rethrow.
return Futures.failedFuture(ex);
});
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testRecovery.
/**
* Tests the ability to resume operations after a recovery.
*/
@Test
public void testRecovery() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = Math.max(1, config.getFlushAttributesThreshold() / 5);
final int updateCount = config.getFlushAttributesThreshold() * 10;
@Cleanup TestContext context = new TestContext(config);
// Generate some data.
val operations = new ArrayList<AttributeUpdaterOperation>();
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
operations.add(op);
}
// include all operations with indices less than or equal to recoveryId and observe the results.
for (int recoveryId = 0; recoveryId < operations.size(); recoveryId++) {
long lastPersistedSeqNo = context.segmentMetadata.getAttributes().getOrDefault(Attributes.ATTRIBUTE_SEGMENT_PERSIST_SEQ_NO, Operation.NO_SEQUENCE_NUMBER);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
@Cleanup val aggregator = context.createAggregator();
val expectedAttributes = new HashMap<AttributeId, Long>();
for (int i = 0; i <= recoveryId; i++) {
AttributeUpdaterOperation op = operations.get(i);
// Collect the latest values from this update.
op.getAttributeUpdates().stream().filter(au -> !Attributes.isCoreAttribute(au.getAttributeId())).forEach(au -> expectedAttributes.put(au.getAttributeId(), au.getValue()));
aggregator.add(op);
// We only expect to process an op if its SeqNo is beyond the last one we committed.
boolean expectedToProcess = op.getSequenceNumber() > lastPersistedSeqNo;
if (expectedToProcess) {
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
}
Assert.assertEquals("Unexpected LUSN.", firstOutstandingSeqNo.get(), aggregator.getLowestUncommittedSequenceNumber());
boolean expectFlush = outstandingAttributes.size() >= config.getFlushAttributesThreshold();
Assert.assertEquals("Unexpected value returned by mustFlush() (count threshold).", expectFlush, aggregator.mustFlush());
if (expectFlush) {
// Call flush() and inspect the result.
WriterFlushResult flushResult = aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not all attributes were flushed (count threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
// We want to verify just those attributes that we flushed, not all of them (not all may be in yet).
AssertExtensions.assertMapEquals("Unexpected attributes stored in AttributeIndex.", expectedAttributes, context.dataSource.getPersistedAttributes(SEGMENT_ID));
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
outstandingAttributes.clear();
firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
}
}
// We have reached the end. Flush the rest and perform a full check.
if (recoveryId == operations.size() - 1) {
aggregator.add(generateSealAndUpdateMetadata(context));
aggregator.flush(TIMEOUT).join();
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
}
}
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testFlushWithGenericErrors.
/**
* Tests {@link AttributeAggregator#flush} in the presence of generic errors.
*/
@Test
public void testFlushWithGenericErrors() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
@Cleanup TestContext context = new TestContext(config);
// Add a single operation, which alone should trigger the flush.
AttributeUpdaterOperation op = generateUpdateAttributesAndUpdateMetadata(config.getFlushAttributesThreshold(), context);
context.aggregator.add(op);
Assert.assertTrue("Unexpected result from mustFlush().", context.aggregator.mustFlush());
// Cause the attribute update to fail, and validate that the error is bubbled up.
context.dataSource.setPersistAttributesErrorInjector(new ErrorInjector<>(i -> true, IntentionalException::new));
AssertExtensions.assertSuppliedFutureThrows("Expected flush() to have failed.", () -> context.aggregator.flush(TIMEOUT), ex -> ex instanceof IntentionalException);
Assert.assertTrue("Unexpected result from mustFlush() after failed attempt.", context.aggregator.mustFlush());
checkAutoAttributes(Operation.NO_SEQUENCE_NUMBER, context);
// Now try again, without errors.
context.dataSource.setPersistAttributesErrorInjector(null);
val result = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Unexpected number of attributes flushed", op.getAttributeUpdates().size() - 1, // Subtract 1 for core attributes.
result.getFlushedAttributes());
checkAttributes(context);
checkAutoAttributesEventual(op.getSequenceNumber(), context);
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testRecoverySealedSegment.
/**
* Tests the ability to resume operations after a recovery on a sealed segment.
*/
@Test
public void testRecoverySealedSegment() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = 1;
final int updateCount = config.getFlushAttributesThreshold() - 1;
@Cleanup TestContext context = new TestContext(config);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
// Generate a few operations.
val operations = new ArrayList<SegmentOperation>();
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
operations.add(op);
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
}
operations.add(generateSealAndUpdateMetadata(context));
// Add them to the first aggregator, then flush them.
for (val op : operations) {
context.aggregator.add(op);
}
val flushResult = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not all attributes were flushed.", outstandingAttributes.size(), flushResult.getFlushedAttributes());
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
// Create a second (recovered) aggregator and re-add the operations.
@Cleanup val aggregator2 = context.createAggregator();
for (val op : operations) {
aggregator2.add(op);
}
Assert.assertEquals("Not expecting any operation outstanding in the second aggregator.", Operation.NO_SEQUENCE_NUMBER, aggregator2.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Expected a flush of the second aggregator.", aggregator2.mustFlush());
val flushResult2 = aggregator2.flush(TIMEOUT).join();
Assert.assertEquals("Not expecting any attributes to be flushed.", 0, flushResult2.getFlushedAttributes());
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
// Create a third (recovered) aggregator, but clear out the auto-attributes.
context.segmentMetadata.updateAttributes(Collections.singletonMap(Attributes.ATTRIBUTE_SEGMENT_PERSIST_SEQ_NO, Attributes.NULL_ATTRIBUTE_VALUE));
@Cleanup val aggregator3 = context.createAggregator();
for (val op : operations) {
aggregator3.add(op);
}
Assert.assertEquals("Unexpected LUSN for the third aggregator.", firstOutstandingSeqNo.get(), aggregator3.getLowestUncommittedSequenceNumber());
Assert.assertTrue("Expected a flush of the third aggregator.", aggregator3.mustFlush());
val flushResult3 = aggregator2.flush(TIMEOUT).join();
Assert.assertEquals("Not expecting any attributes to be flushed.", 0, flushResult3.getFlushedAttributes());
checkAttributes(context);
// Segment is sealed, so it couldn't have updated this value.
checkAutoAttributesEventual(Operation.NO_SEQUENCE_NUMBER, context);
}
Aggregations