use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testFlush.
/**
* Tests {@link AttributeAggregator#flush}.
*/
@Test
public void testFlush() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = Math.max(1, config.getFlushAttributesThreshold() / 5);
final int updateCount = config.getFlushAttributesThreshold() * 10;
@Cleanup TestContext context = new TestContext(config);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
// Part 0: Empty operations.
context.aggregator.add(generateUpdateAttributesAndUpdateMetadata(0, context));
Assert.assertFalse("Unexpected value returned by mustFlush() after empty operation.", context.aggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after empty operation.", firstOutstandingSeqNo.get(), context.aggregator.getLowestUncommittedSequenceNumber());
// Part 1: flush triggered by accumulated counts.
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
context.aggregator.add(op);
boolean expectFlush = outstandingAttributes.size() >= config.getFlushAttributesThreshold();
Assert.assertEquals("Unexpected value returned by mustFlush() (count threshold).", expectFlush, context.aggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (count threshold).", firstOutstandingSeqNo.get(), context.aggregator.getLowestUncommittedSequenceNumber());
// Call flush() and inspect the result.
WriterFlushResult flushResult = context.aggregator.flush(TIMEOUT).join();
if (expectFlush) {
Assert.assertFalse("Unexpected value returned by mustFlush() after flush (count threshold).", context.aggregator.mustFlush());
Assert.assertEquals("Not all attributes were flushed (count threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
outstandingAttributes.clear();
firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
assertEventuallyEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (count threshold).", firstOutstandingSeqNo.get(), context.aggregator::getLowestUncommittedSequenceNumber);
} else {
Assert.assertEquals(String.format("Not expecting a flush. OutstandingCount=%s, Threshold=%d", outstandingAttributes.size(), config.getFlushThresholdBytes()), 0, flushResult.getFlushedBytes());
}
}
// Part 2: flush triggered by time.
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
context.aggregator.add(op);
// Call flush() and inspect the result.
Assert.assertFalse("Unexpected value returned by mustFlush() before time elapsed (time threshold).", context.aggregator.mustFlush());
Assert.assertEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() before flush (time threshold).", firstOutstandingSeqNo.get(), context.aggregator.getLowestUncommittedSequenceNumber());
val flushResult = forceTimeFlush(context);
// We are always expecting a flush.
Assert.assertEquals("Not all attributes were flushed (time threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
outstandingAttributes.clear();
firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
assertEventuallyEquals("Unexpected value returned by getLowestUncommittedSequenceNumber() after flush (time threshold).", firstOutstandingSeqNo.get(), context.aggregator::getLowestUncommittedSequenceNumber);
Assert.assertEquals("Not expecting any merged bytes in this test.", 0, flushResult.getMergedBytes());
checkAttributes(context);
}
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testSeal.
/**
* Tests the ability to seal the Attribute Index.
*/
@Test
public void testSeal() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = 1;
final int updateCount = config.getFlushAttributesThreshold() - 1;
@Cleanup TestContext context = new TestContext(config);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
// Add a few operations.
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
context.aggregator.add(op);
}
Assert.assertFalse("Not expecting a flush yet.", context.aggregator.mustFlush());
context.aggregator.add(generateSealAndUpdateMetadata(context));
Assert.assertTrue("Expecting a flush after a seal operation.", context.aggregator.mustFlush());
val flushResult = context.aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not all attributes were flushed.", outstandingAttributes.size(), flushResult.getFlushedAttributes());
Assert.assertFalse("Not expecting a flush required after flushing everything.", context.aggregator.mustFlush());
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
AssertExtensions.assertSuppliedFutureThrows("Expected the attribute index to have been sealed.", () -> context.dataSource.persistAttributes(SEGMENT_ID, Collections.singletonMap(AttributeId.randomUUID(), 1L), TIMEOUT), ex -> ex instanceof StreamSegmentSealedException);
}
use of io.pravega.segmentstore.server.logs.operations.AttributeUpdaterOperation in project pravega by pravega.
the class AttributeAggregatorTests method testRecovery.
/**
* Tests the ability to resume operations after a recovery.
*/
@Test
public void testRecovery() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
final int attributesPerUpdate = Math.max(1, config.getFlushAttributesThreshold() / 5);
final int updateCount = config.getFlushAttributesThreshold() * 10;
@Cleanup TestContext context = new TestContext(config);
// Generate some data.
val operations = new ArrayList<AttributeUpdaterOperation>();
for (int i = 0; i < updateCount; i++) {
// Add another operation.
AttributeUpdaterOperation op = i % 2 == 0 ? generateAppendAndUpdateMetadata(attributesPerUpdate, context) : generateUpdateAttributesAndUpdateMetadata(attributesPerUpdate, context);
operations.add(op);
}
// include all operations with indices less than or equal to recoveryId and observe the results.
for (int recoveryId = 0; recoveryId < operations.size(); recoveryId++) {
long lastPersistedSeqNo = context.segmentMetadata.getAttributes().getOrDefault(Attributes.ATTRIBUTE_SEGMENT_PERSIST_SEQ_NO, Operation.NO_SEQUENCE_NUMBER);
val outstandingAttributes = new HashSet<AttributeId>();
val firstOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
val lastOutstandingSeqNo = new AtomicLong(Operation.NO_SEQUENCE_NUMBER);
@Cleanup val aggregator = context.createAggregator();
val expectedAttributes = new HashMap<AttributeId, Long>();
for (int i = 0; i <= recoveryId; i++) {
AttributeUpdaterOperation op = operations.get(i);
// Collect the latest values from this update.
op.getAttributeUpdates().stream().filter(au -> !Attributes.isCoreAttribute(au.getAttributeId())).forEach(au -> expectedAttributes.put(au.getAttributeId(), au.getValue()));
aggregator.add(op);
// We only expect to process an op if its SeqNo is beyond the last one we committed.
boolean expectedToProcess = op.getSequenceNumber() > lastPersistedSeqNo;
if (expectedToProcess) {
addExtendedAttributes(op, outstandingAttributes);
firstOutstandingSeqNo.compareAndSet(Operation.NO_SEQUENCE_NUMBER, op.getSequenceNumber());
lastOutstandingSeqNo.set(op.getSequenceNumber());
}
Assert.assertEquals("Unexpected LUSN.", firstOutstandingSeqNo.get(), aggregator.getLowestUncommittedSequenceNumber());
boolean expectFlush = outstandingAttributes.size() >= config.getFlushAttributesThreshold();
Assert.assertEquals("Unexpected value returned by mustFlush() (count threshold).", expectFlush, aggregator.mustFlush());
if (expectFlush) {
// Call flush() and inspect the result.
WriterFlushResult flushResult = aggregator.flush(TIMEOUT).join();
Assert.assertEquals("Not all attributes were flushed (count threshold).", outstandingAttributes.size(), flushResult.getFlushedAttributes());
// We want to verify just those attributes that we flushed, not all of them (not all may be in yet).
AssertExtensions.assertMapEquals("Unexpected attributes stored in AttributeIndex.", expectedAttributes, context.dataSource.getPersistedAttributes(SEGMENT_ID));
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
outstandingAttributes.clear();
firstOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
lastOutstandingSeqNo.set(Operation.NO_SEQUENCE_NUMBER);
}
}
// We have reached the end. Flush the rest and perform a full check.
if (recoveryId == operations.size() - 1) {
aggregator.add(generateSealAndUpdateMetadata(context));
aggregator.flush(TIMEOUT).join();
checkAttributes(context);
checkAutoAttributesEventual(lastOutstandingSeqNo.get(), context);
}
}
}
Aggregations