use of io.pravega.controller.store.stream.records.CommittingTransactionsRecord in project pravega by pravega.
the class PersistentStreamBase method rollingTxnCreateDuplicateEpochs.
@Override
public CompletableFuture<Void> rollingTxnCreateDuplicateEpochs(Map<Long, Long> sealedTxnEpochSegments, long time, VersionedMetadata<CommittingTransactionsRecord> record, OperationContext context) {
Preconditions.checkNotNull(context, "Operation context cannot be null");
Preconditions.checkArgument(record.getObject().isRollingTxnRecord());
CommittingTransactionsRecord committingTxnRecord = record.getObject();
return getActiveEpoch(true, context).thenCompose(activeEpochRecord -> getEpochRecord(committingTxnRecord.getEpoch(), context).thenCompose(transactionEpochRecord -> {
if (activeEpochRecord.getEpoch() > committingTxnRecord.getCurrentEpoch()) {
log.debug(context.getRequestId(), "Duplicate Epochs {} already created. Ignore.", committingTxnRecord.getNewActiveEpoch());
return CompletableFuture.completedFuture(null);
}
long timeStamp = Math.max(activeEpochRecord.getCreationTime() + 1, time);
ImmutableList.Builder<StreamSegmentRecord> duplicateTxnSegmentsBuilder = ImmutableList.builder();
transactionEpochRecord.getSegments().stream().forEach(x -> duplicateTxnSegmentsBuilder.add(newSegmentRecord(computeSegmentId(getSegmentNumber(x.segmentId()), committingTxnRecord.getNewTxnEpoch()), timeStamp, x.getKeyStart(), x.getKeyEnd())));
ImmutableList.Builder<StreamSegmentRecord> duplicateActiveSegmentsBuilder = ImmutableList.builder();
activeEpochRecord.getSegments().stream().forEach(x -> duplicateActiveSegmentsBuilder.add(newSegmentRecord(computeSegmentId(getSegmentNumber(x.segmentId()), committingTxnRecord.getNewActiveEpoch()), timeStamp + 1, x.getKeyStart(), x.getKeyEnd())));
CompletableFuture<EpochRecord> txnEpochFuture = getSplitMergeCountsTillEpoch(activeEpochRecord, context).thenCompose(txnSplitMergeCount -> {
ImmutableList<StreamSegmentRecord> duplicateTxnEpochSegments = duplicateTxnSegmentsBuilder.build();
EpochRecord duplicateTxnEpoch = new EpochRecord(committingTxnRecord.getNewTxnEpoch(), transactionEpochRecord.getReferenceEpoch(), duplicateTxnEpochSegments, timeStamp, getNewEpochSplitCount(txnSplitMergeCount.getKey(), activeEpochRecord.getSegments(), duplicateTxnEpochSegments), getNewEpochMergeCount(txnSplitMergeCount.getValue(), activeEpochRecord.getSegments(), duplicateTxnEpochSegments));
return CompletableFuture.completedFuture(duplicateTxnEpoch);
});
CompletableFuture<EpochRecord> activeEpochFuture = txnEpochFuture.thenCompose(previousEpoch -> getSplitMergeCountsTillEpoch(previousEpoch, context).thenCompose(prevSplitMergeCounts -> {
ImmutableList<StreamSegmentRecord> activeEpochSegments = duplicateActiveSegmentsBuilder.build();
EpochRecord duplicateActiveEpoch = new EpochRecord(committingTxnRecord.getNewActiveEpoch(), activeEpochRecord.getReferenceEpoch(), activeEpochSegments, timeStamp + 1, getNewEpochSplitCount(prevSplitMergeCounts.getKey(), previousEpoch.getSegments(), activeEpochSegments), getNewEpochMergeCount(prevSplitMergeCounts.getValue(), previousEpoch.getSegments(), activeEpochSegments));
return CompletableFuture.completedFuture(duplicateActiveEpoch);
}));
return CompletableFuture.allOf(txnEpochFuture, activeEpochFuture).thenCompose(v -> {
EpochRecord duplicateTxnEpoch = txnEpochFuture.join();
EpochRecord duplicateActiveEpoch = activeEpochFuture.join();
HistoryTimeSeriesRecord timeSeriesRecordTxnEpoch = new HistoryTimeSeriesRecord(duplicateTxnEpoch.getEpoch(), duplicateTxnEpoch.getReferenceEpoch(), ImmutableList.of(), ImmutableList.of(), timeStamp);
HistoryTimeSeriesRecord timeSeriesRecordActiveEpoch = new HistoryTimeSeriesRecord(duplicateActiveEpoch.getEpoch(), duplicateActiveEpoch.getReferenceEpoch(), ImmutableList.of(), ImmutableList.of(), timeStamp + 1);
return createEpochRecord(duplicateTxnEpoch, context).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecordTxnEpoch, context)).thenCompose(x -> createEpochRecord(duplicateActiveEpoch, context)).thenCompose(x -> updateHistoryTimeSeries(timeSeriesRecordActiveEpoch, context)).thenCompose(x -> createSegmentSealedEpochRecords(activeEpochRecord.getSegments().stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList()), duplicateTxnEpoch.getEpoch(), context)).thenCompose(x -> createSegmentSealedEpochRecords(duplicateTxnEpoch.getSegments().stream().map(StreamSegmentRecord::segmentId).collect(Collectors.toList()), duplicateActiveEpoch.getEpoch(), context));
}).thenCompose(r -> updateSealedSegmentSizes(sealedTxnEpochSegments, context));
}));
}
use of io.pravega.controller.store.stream.records.CommittingTransactionsRecord in project pravega by pravega.
the class RequestHandlersTest method concurrentTxnCommit.
private void concurrentTxnCommit(String stream, String func, boolean expectFailureOnFirstJob, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount, int expectedVersion) throws Exception {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
StreamMetadataStore streamStore2 = getStore();
CommitRequestHandler requestHandler1 = new CommitRequestHandler(streamStore1Spied, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
CommitRequestHandler requestHandler2 = new CommitRequestHandler(streamStore2, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
// create txn on epoch 0 and set it to committing
UUID txnId = streamStore1.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnEpoch0 = streamStore1.createTransaction(scope, stream, txnId, 1000L, 10000L, null, executor).join();
streamStore1.sealTransaction(scope, stream, txnId, true, Optional.of(txnEpoch0.getVersion()), "", Long.MIN_VALUE, null, executor).join();
// regular commit
// start commit transactions
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
CommitEvent commitOnEpoch1 = new CommitEvent(scope, stream, 0);
setMockCommitTxnLatch(streamStore1, streamStore1Spied, func, signal, wait);
CompletableFuture<Void> future1 = CompletableFuture.completedFuture(null).thenComposeAsync(v -> requestHandler1.execute(commitOnEpoch1), executor);
signal.join();
// let this run to completion. this should succeed
requestHandler2.execute(commitOnEpoch1).join();
wait.complete(null);
if (expectFailureOnFirstJob) {
AssertExtensions.assertSuppliedFutureThrows("first commit should fail", () -> future1, firstExceptionPredicate);
} else {
future1.join();
}
verify(streamStore1Spied, times(invocationCount.get("startCommitTransactions"))).startCommitTransactions(anyString(), anyString(), anyInt(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeCommitTransactions"))).completeCommitTransactions(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
VersionedMetadata<CommittingTransactionsRecord> versioned = streamStore1.getVersionedCommittingTransactionsRecord(scope, stream, null, executor).join();
assertEquals(CommittingTransactionsRecord.EMPTY, versioned.getObject());
assertEquals(expectedVersion, getVersionNumber(versioned.getVersion()));
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
streamStore1.close();
streamStore2.close();
}
use of io.pravega.controller.store.stream.records.CommittingTransactionsRecord in project pravega by pravega.
the class ControllerEventProcessorPravegaTablesStreamTest method testTxnPartialCommitRetry.
@Test(timeout = 10000)
public void testTxnPartialCommitRetry() {
PravegaTablesStoreHelper storeHelper = spy(new PravegaTablesStoreHelper(SegmentHelperMock.getSegmentHelperMockForTables(executor), GrpcAuthHelper.getDisabledAuthHelper(), executor));
this.streamStore = new PravegaTablesStreamMetadataStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor, Duration.ofHours(Config.COMPLETED_TRANSACTION_TTL_IN_HOURS), storeHelper);
SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
EventHelper eventHelperMock = EventHelperMock.getEventHelperMock(executor, "1", ((AbstractStreamMetadataStore) this.streamStore).getHostTaskIndex());
StreamMetadataTasks streamMetadataTasks = new StreamMetadataTasks(streamStore, this.bucketStore, TaskStoreFactory.createInMemoryStore(executor), segmentHelperMock, executor, "1", GrpcAuthHelper.getDisabledAuthHelper(), eventHelperMock);
StreamTransactionMetadataTasks streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(this.streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
String scope = "scope";
String stream = "stream";
// region createStream
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
streamStore.createScope(scope, null, executor).join();
long start = System.currentTimeMillis();
streamStore.createStream(scope, stream, configuration1, start, null, executor).join();
streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
StreamMetadataTasks spyStreamMetadataTasks = spy(streamMetadataTasks);
List<VersionedTransactionData> txnDataList = createAndCommitTransactions(3);
int epoch = txnDataList.get(0).getEpoch();
spyStreamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, spyStreamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
final String committingTxnsRecordKey = "committingTxns";
long failingClientRequestId = 123L;
doReturn(failingClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
OperationContext context = this.streamStore.createStreamContext(scope, stream, failingClientRequestId);
streamStore.startCommitTransactions(scope, stream, 100, context, executor).join();
doReturn(Futures.failedFuture(new RuntimeException())).when(storeHelper).updateEntry(anyString(), eq(committingTxnsRecordKey), any(), ArgumentMatchers.<Function<String, byte[]>>any(), any(), eq(failingClientRequestId));
AssertExtensions.assertFutureThrows("Updating CommittingTxnRecord fails", commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(storeHelper, times(1)).removeEntries(anyString(), any(), eq(failingClientRequestId));
VersionedMetadata<CommittingTransactionsRecord> versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
CommittingTransactionsRecord commitRecord = versionedCommitRecord.getObject();
assertFalse(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
for (VersionedTransactionData txnData : txnDataList) {
checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
}
long goodClientRequestId = 4567L;
doReturn(goodClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)).join();
versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
commitRecord = versionedCommitRecord.getObject();
assertTrue(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
for (VersionedTransactionData txnData : txnDataList) {
checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
}
}
use of io.pravega.controller.store.stream.records.CommittingTransactionsRecord in project pravega by pravega.
the class StreamMetadataStoreTest method txnOrderTest.
@Test(timeout = 30000)
public void txnOrderTest() throws Exception {
final String scope = "txnOrder";
final String stream = "txnOrder";
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).get();
store.createStream(scope, stream, configuration, start, null, executor).get();
store.setState(scope, stream, State.ACTIVE, null, executor).get();
long scaleTs = System.currentTimeMillis();
SimpleEntry<Double, Double> segment2 = new SimpleEntry<>(0.5, 0.75);
SimpleEntry<Double, Double> segment3 = new SimpleEntry<>(0.75, 1.0);
List<Long> scale1SealedSegments = Collections.singletonList(1L);
// create 3 transactions on epoch 0 --> tx00, tx01, tx02.. mark first as commit, mark second as abort,
// keep third as open. add ordered entries for all three.. verify that they are present in ordered set.
UUID tx00 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx00, 100, 100, null, executor).get();
UUID tx01 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx01, 100, 100, null, executor).get();
UUID tx02 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx02, 100, 100, null, executor).get();
// committing
store.sealTransaction(scope, stream, tx00, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
// aborting
store.sealTransaction(scope, stream, tx01, false, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
PersistentStreamBase streamObj = (PersistentStreamBase) ((AbstractStreamMetadataStore) store).getStream(scope, stream, null);
// duplicate for tx00
OperationContext context = new StreamOperationContext(((AbstractStreamMetadataStore) store).getScope(scope, null), streamObj, 0L);
streamObj.addTxnToCommitOrder(tx00, context).join();
// entry for aborting transaction tx01
streamObj.addTxnToCommitOrder(tx01, context).join();
// entry for open transaction tx02
streamObj.addTxnToCommitOrder(tx02, context).join();
Map<Long, UUID> positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(1L), tx00);
assertEquals(positions.get(2L), tx01);
assertEquals(positions.get(3L), tx02);
// verify that when we retrieve transactions from lowest epoch we get tx00
List<VersionedTransactionData> orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(1, orderedRecords.size());
assertEquals(tx00, orderedRecords.get(0).getId());
// verify that duplicates and stale entries are purged. entries for open transaction and committing are retained
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(2, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(3L), tx02);
// scale
scale(scope, stream, scaleTs, Arrays.asList(segment2, segment3), scale1SealedSegments);
// create 3 transactions on epoch 1 --> tx10, tx11, tx12.. mark first as commit, mark second as abort,
// keep third as open. add ordered entries for all three.. verify that they are present in ordered set.
UUID tx10 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx10, 100, 100, null, executor).get();
UUID tx11 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx11, 100, 100, null, executor).get();
UUID tx12 = store.generateTransactionId(scope, stream, null, executor).join();
store.createTransaction(scope, stream, tx12, 100, 100, null, executor).get();
// set all three transactions to committing
store.sealTransaction(scope, stream, tx10, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx11, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
store.sealTransaction(scope, stream, tx12, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).get();
// verify that we still get tx00 only
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(1, orderedRecords.size());
assertEquals(tx00, orderedRecords.get(0).getId());
assertEquals(0L, orderedRecords.get(0).getCommitOrder().longValue());
// verify that positions has 3 new entries added though
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(5, positions.size());
assertEquals(positions.get(0L), tx00);
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
// verify that after including transaction tx00 in the record, we no longer keep its reference in the ordered
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertFalse(positions.containsKey(0L));
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
// verify that we need to perform rolling transaction
EpochRecord activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(0, record.getObject().getEpoch());
assertEquals(1, activeEpoch.getEpoch());
// also, transactions to commit match transactions in lowest epoch
assertEquals(record.getObject().getTransactionsToCommit(), orderedRecords.stream().map(x -> x.getId()).collect(Collectors.toList()));
record = store.startRollingTxn(scope, stream, activeEpoch.getEpoch(), record, null, executor).join();
store.rollingTxnCreateDuplicateEpochs(scope, stream, Collections.emptyMap(), System.currentTimeMillis(), record, null, executor).join();
store.completeRollingTxn(scope, stream, Collections.emptyMap(), record, null, executor).join();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// after committing, we should have committed tx00 while having purged references for tx01 and tx02
// getting ordered list should return txn on epoch 1 in the order in which we issued commits
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(3, orderedRecords.size());
assertEquals(tx10, orderedRecords.get(0).getId());
assertEquals(tx11, orderedRecords.get(1).getId());
assertEquals(tx12, orderedRecords.get(2).getId());
// verify that transactions are still present in position
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(4, positions.size());
assertEquals(positions.get(3L), tx02);
assertEquals(positions.get(4L), tx10);
assertEquals(positions.get(5L), tx11);
assertEquals(positions.get(6L), tx12);
// we will issue next round of commit, which will commit txns on epoch 1.
activeEpoch = store.getActiveEpoch(scope, stream, null, true, executor).join();
record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
List<UUID> txnIdList = orderedRecords.stream().map(x -> x.getId()).collect(Collectors.toList());
// verify that the order in record is same
assertEquals(record.getObject().getTransactionsToCommit(), txnIdList);
// verify that transactions included for commit are removed from positions.
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(1, positions.size());
assertEquals(positions.get(3L), tx02);
assertEquals(record.getObject().getTransactionsToCommit(), txnIdList);
store.setState(scope, stream, State.COMMITTING_TXN, null, executor).join();
// verify that it is committing transactions on epoch 1
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.emptyMap()).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
// references for tx00 should be removed from orderer
orderedRecords = streamObj.getOrderedCommittingTxnInLowestEpoch(100, context).join();
assertEquals(0, orderedRecords.size());
// verify that only reference to the open transaction is retained in position
positions = streamObj.getAllOrderedCommittingTxns(context).join();
assertEquals(1, positions.size());
assertEquals(positions.get(3L), tx02);
}
use of io.pravega.controller.store.stream.records.CommittingTransactionsRecord in project pravega by pravega.
the class StreamMetadataStoreTest method testMarkOnTransactionCommit.
@Test(timeout = 30000)
public void testMarkOnTransactionCommit() {
// create txn
// seal txn with committing
final String scope = "MarkOnTransactionCommit";
final String stream = "MarkOnTransactionCommit";
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
long start = System.currentTimeMillis();
store.createScope(scope, null, executor).join();
store.createStream(scope, stream, configuration, start, null, executor).join();
store.setState(scope, stream, State.ACTIVE, null, executor).join();
UUID txnId = store.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData tx01 = store.createTransaction(scope, stream, txnId, 100, 100, null, executor).join();
String writer1 = "writer1";
long time = 1L;
store.sealTransaction(scope, stream, txnId, true, Optional.of(tx01.getVersion()), writer1, time, null, executor).join();
VersionedMetadata<CommittingTransactionsRecord> record = store.startCommitTransactions(scope, stream, 100, null, executor).join().getKey();
store.completeCommitTransactions(scope, stream, record, null, executor, Collections.singletonMap(writer1, new TxnWriterMark(time, Collections.singletonMap(0L, 1L), txnId))).join();
// verify that writer mark is created in the store
WriterMark mark = store.getWriterMark(scope, stream, writer1, null, executor).join();
assertEquals(mark.getTimestamp(), time);
assertEquals(mark.getPosition().size(), 1);
assertTrue(mark.getPosition().containsKey(0L));
assertEquals(mark.getPosition().get(0L).longValue(), 1L);
}
Aggregations