use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class SegmentAggregatorTests method testFlushEmptyAppend.
/**
* Tests the behavior of flush() empty appends.
*/
@Test
public void testFlushEmptyAppend() throws Exception {
final WriterConfig config = DEFAULT_CONFIG;
val rnd = new Random(0);
final byte[] initialBytes = new byte[config.getMaxFlushSizeBytes()];
final byte[] mergedBytes = new byte[100];
final int segmentLength = initialBytes.length + mergedBytes.length;
rnd.nextBytes(initialBytes);
rnd.nextBytes(mergedBytes);
@Cleanup TestContext context = new TestContext(config);
// Create a segment in Storage.
context.storage.create(SEGMENT_NAME, TIMEOUT).join();
context.segmentAggregator.initialize(TIMEOUT).join();
val metadata = (UpdateableSegmentMetadata) context.segmentAggregator.getMetadata();
metadata.setLength(segmentLength);
// First append fills up the max limit for the AggregatedAppend buffer.
val append1 = new StreamSegmentAppendOperation(SEGMENT_ID, new ByteArraySegment(initialBytes), null);
append1.setStreamSegmentOffset(0);
append1.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(append1);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(append1));
// Second append is empty.
val emptyAppend = new StreamSegmentAppendOperation(SEGMENT_ID, BufferView.empty(), null);
emptyAppend.setStreamSegmentOffset(initialBytes.length);
emptyAppend.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(emptyAppend);
context.segmentAggregator.add(new CachedStreamSegmentAppendOperation(emptyAppend));
// Create a source segment.
val sourceAggregator = context.transactionAggregators[0];
val sourceMetadata = (UpdateableSegmentMetadata) sourceAggregator.getMetadata();
sourceMetadata.setLength(mergedBytes.length);
sourceMetadata.setStorageLength(mergedBytes.length);
context.storage.create(sourceMetadata.getName(), TIMEOUT).join();
context.storage.openWrite(sourceMetadata.getName()).thenCompose(handle -> context.storage.write(handle, 0, new ByteArrayInputStream(mergedBytes), mergedBytes.length, TIMEOUT).thenCompose(v -> context.storage.seal(handle, TIMEOUT))).join();
// And include it via a Merge Op.
sourceMetadata.markSealed();
sourceMetadata.markSealedInStorage();
sourceMetadata.markMerged();
val mergeOp = new MergeSegmentOperation(SEGMENT_ID, sourceMetadata.getId());
mergeOp.setStreamSegmentOffset(initialBytes.length);
mergeOp.setLength(sourceMetadata.getLength());
mergeOp.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.segmentAggregator.add(mergeOp);
// Flush, and verify the result.
val flushResult1 = context.segmentAggregator.flush(TIMEOUT).get(TIMEOUT.toMillis(), TimeUnit.MILLISECONDS);
Assert.assertEquals("Unexpected number of bytes flushed", initialBytes.length, flushResult1.getFlushedBytes());
Assert.assertEquals("Unexpected number of bytes merged", mergedBytes.length, flushResult1.getMergedBytes());
byte[] expectedData = new byte[initialBytes.length + mergedBytes.length];
System.arraycopy(initialBytes, 0, expectedData, 0, initialBytes.length);
System.arraycopy(mergedBytes, 0, expectedData, initialBytes.length, mergedBytes.length);
verifySegmentData(expectedData, context);
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class SegmentAggregatorTests method generateAppendAndUpdateMetadata.
private StorageOperation generateAppendAndUpdateMetadata(long segmentId, byte[] data, TestContext context) {
UpdateableSegmentMetadata segmentMetadata = context.containerMetadata.getStreamSegmentMetadata(segmentId);
long offset = segmentMetadata.getLength();
segmentMetadata.setLength(offset + data.length);
StreamSegmentAppendOperation op = new StreamSegmentAppendOperation(segmentId, new ByteArraySegment(data), null);
op.setStreamSegmentOffset(offset);
op.setSequenceNumber(context.containerMetadata.nextOperationSequenceNumber());
context.dataSource.recordAppend(op);
return new CachedStreamSegmentAppendOperation(op);
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class WriterTableProcessorTests method generateSimulatedAppend.
private CachedStreamSegmentAppendOperation generateSimulatedAppend(long offset, int length, TestContext context) {
val op = new StreamSegmentAppendOperation(context.metadata.getId(), offset, new ByteArraySegment(new byte[length]), null);
op.setSequenceNumber(context.nextSequenceNumber());
return new CachedStreamSegmentAppendOperation(op);
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class WriterTableProcessorTests method testReconcileTableIndexOffset.
/**
* Tests the ability to reconcile the {@link TableAttributes#INDEX_OFFSET} value when that changes behind the scenes.
*/
@Test
public void testReconcileTableIndexOffset() throws Exception {
@Cleanup val context = new TestContext();
// Generate two TableEntries, write them to the segment and queue them into the processor.
val e1 = TableEntry.unversioned(new ByteArraySegment("k1".getBytes()), new ByteArraySegment("v1".getBytes()));
val e2 = TableEntry.unversioned(new ByteArraySegment("k2".getBytes()), new ByteArraySegment("v2".getBytes()));
val append1 = generateRawAppend(e1, INITIAL_LAST_INDEXED_OFFSET, context);
val append2 = generateRawAppend(e2, append1.getLastStreamSegmentOffset(), context);
context.segmentMock.append(append1.getData(), null, TIMEOUT).join();
context.segmentMock.append(append2.getData(), null, TIMEOUT).join();
context.processor.add(new CachedStreamSegmentAppendOperation(append1));
context.processor.add(new CachedStreamSegmentAppendOperation(append2));
// 1. INDEX_OFFSET changes to smaller than first append
context.metadata.updateAttributes(Collections.singletonMap(TableAttributes.INDEX_OFFSET, INITIAL_LAST_INDEXED_OFFSET - 1));
int attributeCountBefore = context.segmentMock.getAttributeCount();
AssertExtensions.assertSuppliedFutureThrows("flush() worked when INDEX_OFFSET decreased.", () -> context.processor.flush(TIMEOUT), ex -> ex instanceof DataCorruptionException);
int attributeCountAfter = context.segmentMock.getAttributeCount();
Assert.assertEquals("flush() seems to have modified the index after failed attempt", attributeCountBefore, attributeCountAfter);
Assert.assertEquals("flush() seems to have modified the index after failed attempt.", INITIAL_LAST_INDEXED_OFFSET - 1, IndexReader.getLastIndexedOffset(context.metadata));
// 2. INDEX_OFFSET changes to middle of append.
context.metadata.updateAttributes(Collections.singletonMap(TableAttributes.INDEX_OFFSET, INITIAL_LAST_INDEXED_OFFSET + 1));
attributeCountBefore = context.segmentMock.getAttributeCount();
AssertExtensions.assertSuppliedFutureThrows("flush() worked when INDEX_OFFSET changed to middle of append.", () -> context.processor.flush(TIMEOUT), ex -> ex instanceof DataCorruptionException);
attributeCountAfter = context.segmentMock.getAttributeCount();
Assert.assertEquals("flush() seems to have modified the index after failed attempt", attributeCountBefore, attributeCountAfter);
Assert.assertEquals("flush() seems to have modified the index after failed attempt.", INITIAL_LAST_INDEXED_OFFSET + 1, IndexReader.getLastIndexedOffset(context.metadata));
// 3. INDEX_OFFSET changes after the first append, but before the second one.
context.metadata.updateAttributes(Collections.singletonMap(TableAttributes.INDEX_OFFSET, append2.getStreamSegmentOffset()));
context.connector.refreshLastIndexedOffset();
attributeCountBefore = context.segmentMock.getAttributeCount();
context.processor.flush(TIMEOUT).join();
attributeCountAfter = context.segmentMock.getAttributeCount();
AssertExtensions.assertGreaterThan("flush() did not modify the index partial reconciliation.", attributeCountBefore, attributeCountAfter);
Assert.assertEquals("flush() did not modify the index partial reconciliation.", append2.getLastStreamSegmentOffset(), IndexReader.getLastIndexedOffset(context.metadata));
Assert.assertFalse("Unexpected result from mustFlush() after partial reconciliation.", context.processor.mustFlush());
// 4. INDEX_OFFSET changes beyond the last append.
val e3 = TableEntry.unversioned(new ByteArraySegment("k3".getBytes()), new ByteArraySegment("v3".getBytes()));
val append3 = generateRawAppend(e3, append2.getLastStreamSegmentOffset(), context);
context.segmentMock.append(append3.getData(), null, TIMEOUT).join();
context.processor.add(new CachedStreamSegmentAppendOperation(append3));
context.metadata.updateAttributes(Collections.singletonMap(TableAttributes.INDEX_OFFSET, append3.getLastStreamSegmentOffset() + 1));
context.connector.refreshLastIndexedOffset();
attributeCountBefore = context.segmentMock.getAttributeCount();
context.processor.flush(TIMEOUT).join();
attributeCountAfter = context.segmentMock.getAttributeCount();
Assert.assertEquals("flush() seems to have modified the index after full reconciliation.", attributeCountBefore, attributeCountAfter);
Assert.assertEquals("flush() did not properly update INDEX_OFFSET after full reconciliation.", append3.getLastStreamSegmentOffset() + 1, IndexReader.getLastIndexedOffset(context.metadata));
Assert.assertFalse("Unexpected result from mustFlush() after full reconciliation.", context.processor.mustFlush());
}
use of io.pravega.segmentstore.server.logs.operations.CachedStreamSegmentAppendOperation in project pravega by pravega.
the class WriterTableProcessorTests method generateAndPopulateEntriesBatch.
private TestBatchData generateAndPopulateEntriesBatch(int batchSize, Map<BufferView, TableEntry> initialState, TestContext context) {
val result = new TestBatchData(new HashMap<>(initialState));
// Need a list so we can efficiently pick removal candidates.
val allKeys = new ArrayList<>(initialState.keySet());
for (int i = 0; i < batchSize; i++) {
// We only generate a remove if we have something to remove.
boolean remove = allKeys.size() > 0 && (context.random.nextDouble() < REMOVE_FRACTION);
StreamSegmentAppendOperation append;
if (remove) {
val key = allKeys.get(context.random.nextInt(allKeys.size()));
append = generateRawRemove(TableKey.unversioned(key), context.metadata.getLength(), context);
result.expectedEntries.remove(key);
allKeys.remove(key);
} else {
// Generate a new Table Entry.
byte[] keyData = new byte[Math.max(1, context.random.nextInt(MAX_KEY_LENGTH))];
context.random.nextBytes(keyData);
byte[] valueData = new byte[context.random.nextInt(MAX_VALUE_LENGTH)];
context.random.nextBytes(valueData);
// Run the key through the external translator to ensure that we don't clash with internal keys by chance.
// (this is done for us by ContainerTableExtensionImpl already, so we're only simulating the same behavior).
val key = new ByteArraySegment(keyData);
val offset = context.metadata.getLength();
val entry = TableEntry.versioned(key, new ByteArraySegment(valueData), offset);
append = generateRawAppend(entry, offset, context);
result.expectedEntries.put(key, entry);
allKeys.add(key);
}
// Add to segment.
context.metadata.setLength(context.metadata.getLength() + append.getLength());
context.segmentMock.append(append.getData(), null, TIMEOUT).join();
// Add to result.
result.operations.add(new CachedStreamSegmentAppendOperation(append));
}
return result;
}
Aggregations