use of io.pravega.controller.store.stream.VersionedTransactionData in project pravega by pravega.
the class ControllerEventProcessorTest method testCommitEventProcessorFailedWrite.
@Test(timeout = 10000)
public void testCommitEventProcessorFailedWrite() {
UUID txnId = UUID.randomUUID();
VersionedTransactionData txnData = streamStore.createTransaction(SCOPE, STREAM, txnId, 10000, 10000, 10000, null, executor).join();
CommitEventProcessor commitEventProcessor = spy(new CommitEventProcessor(streamStore, streamMetadataTasks, hostStore, executor, segmentHelperMock, null));
EventProcessor.Writer<CommitEvent> successWriter = event -> CompletableFuture.completedFuture(null);
EventProcessor.Writer<CommitEvent> failedWriter = event -> {
CompletableFuture<Void> future = new CompletableFuture<>();
future.completeExceptionally(new RuntimeException("Error"));
return future;
};
// Simulate a failed write
when(commitEventProcessor.getSelfWriter()).thenReturn(failedWriter).thenReturn(successWriter);
// invoke process with epoch > txnData.
commitEventProcessor.process(new CommitEvent(SCOPE, STREAM, txnData.getEpoch() + 1, txnData.getId()), null);
verify(commitEventProcessor, times(2)).getSelfWriter();
}
use of io.pravega.controller.store.stream.VersionedTransactionData in project pravega by pravega.
the class StreamMetadataTasksTest method sealStreamWithTxnTest.
@Test(timeout = 30000)
public void sealStreamWithTxnTest() throws Exception {
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
String streamWithTxn = "streamWithTxn";
// region seal a stream with transactions
long start = System.currentTimeMillis();
final ScalingPolicy policy = ScalingPolicy.fixed(2);
final StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, streamWithTxn, config, start, null, executor).get();
streamStorePartialMock.setState(SCOPE, streamWithTxn, State.ACTIVE, null, executor).get();
// create txn
VersionedTransactionData openTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData committingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
VersionedTransactionData abortingTxn = streamTransactionMetadataTasks.createTxn(SCOPE, streamWithTxn, 10000L, 0L, 1024 * 1024L).get().getKey();
// set transaction to committing
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, committingTxn.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// set transaction to aborting
streamStorePartialMock.sealTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
// Mock getActiveTransactions call such that we return committing txn as OPEN txn.
Map<UUID, ActiveTxnRecord> activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
Map<UUID, ActiveTxnRecord> retVal = activeTxns.entrySet().stream().map(tx -> {
if (!tx.getValue().getTxnStatus().equals(TxnStatus.OPEN) && !tx.getValue().getTxnStatus().equals(TxnStatus.ABORTING)) {
ActiveTxnRecord txRecord = tx.getValue();
return new AbstractMap.SimpleEntry<>(tx.getKey(), new ActiveTxnRecord(txRecord.getTxCreationTimestamp(), txRecord.getLeaseExpiryTime(), txRecord.getMaxExecutionExpiryTime(), TxnStatus.OPEN));
} else {
return tx;
}
}).collect(Collectors.toMap(Map.Entry::getKey, Map.Entry::getValue));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
List<AbortEvent> abortListBefore = abortWriter.getEventList();
streamMetadataTasks.sealStream(SCOPE, streamWithTxn, 0L);
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
requestEventWriter.eventQueue.take();
reset(streamStorePartialMock);
// verify that the txn status is set to aborting
VersionedTransactionData txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.ABORTING);
assertEquals(requestEventWriter.getEventQueue().size(), 1);
// verify that events are posted for the abort txn.
List<AbortEvent> abortListAfter = abortWriter.getEventList();
assertEquals(abortListAfter.size(), abortListBefore.size() + 2);
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(openTxn.getId())));
assertTrue(abortListAfter.stream().anyMatch(x -> x.getTxid().equals(abortingTxn.getId())));
txnData = streamStorePartialMock.getTransactionData(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
assertEquals(txnData.getStatus(), TxnStatus.COMMITTING);
// Mock getActiveTransactions call such that we return some non existent transaction id so that DataNotFound is simulated.
// returning a random transaction with list of active txns such that when its abort is attempted, Data Not Found Exception gets thrown
retVal = new HashMap<>();
retVal.put(UUID.randomUUID(), new ActiveTxnRecord(1L, 1L, 1L, TxnStatus.OPEN));
doReturn(CompletableFuture.completedFuture(retVal)).when(streamStorePartialMock).getActiveTxns(eq(SCOPE), eq(streamWithTxn), any(), any());
AssertExtensions.assertFutureThrows("seal stream did not fail processing with correct exception", processEvent(requestEventWriter), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
reset(streamStorePartialMock);
// Now complete all existing transactions and verify that seal completes
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, openTxn.getId(), null, executor).join();
streamStorePartialMock.abortTransaction(SCOPE, streamWithTxn, abortingTxn.getId(), null, executor).join();
((AbstractStreamMetadataStore) streamStorePartialMock).commitTransaction(SCOPE, streamWithTxn, committingTxn.getId(), null, executor).join();
activeTxns = streamStorePartialMock.getActiveTxns(SCOPE, streamWithTxn, null, executor).join();
assertTrue(activeTxns.isEmpty());
assertTrue(Futures.await(processEvent(requestEventWriter)));
// endregion
}
use of io.pravega.controller.store.stream.VersionedTransactionData in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method idempotentOperationsTests.
@Test(timeout = 10000)
public void idempotentOperationsTests() throws CheckpointStoreException, InterruptedException {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), 0L).join());
// Create 2 transactions
final long lease = 5000;
VersionedTransactionData txData1 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
VersionedTransactionData txData2 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
UUID tx1 = txData1.getId();
UUID tx2 = txData2.getId();
Version tx2Version = txData2.getVersion();
// Commit the first one
Assert.assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
// Ensure that transaction state is COMMITTING.
assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Abort the second one
Assert.assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, tx2Version, 0L).join());
// Ensure that transactions state is ABORTING.
assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// when transaction is in COMMITTING state (resp. ABORTING state).
assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
// Create commit and abort event processors.
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new ConcurrentEventProcessor<>(new CommitRequestHandler(streamStore, streamMetadataTasks, txnTasks, bucketStore, executor, processedCommitEvents), executor));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, executor, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(0, commitEvent.getEpoch());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Wait until the abort event is processed and ensure that the txn state is ABORTED.
AbortEvent abortEvent = processedAbortEvents.take();
assertEquals(tx2, abortEvent.getTxid());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// even after transaction is committed (resp. aborted)
assertEquals(TxnStatus.COMMITTED, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTED, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
}
use of io.pravega.controller.store.stream.VersionedTransactionData in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method txnPingTest.
@Test(timeout = 10000)
public void txnPingTest() throws Exception {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
streamStoreMock.createScope(SCOPE, null, executor).join();
streamStoreMock.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), null, executor).join();
streamStoreMock.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
// Verify Ping transaction on committing transaction.
Pair<VersionedTransactionData, List<StreamSegmentRecord>> txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 0L).join();
UUID txnId = txn.getKey().getId();
txnTasks.commitTxn(SCOPE, STREAM, txnId, 0L).join();
assertEquals(PingTxnStatus.Status.COMMITTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// complete commit of transaction.
streamStoreMock.startCommitTransactions(SCOPE, STREAM, 100, null, executor).join();
val record = streamStoreMock.getVersionedCommittingTransactionsRecord(SCOPE, STREAM, null, executor).join();
streamStoreMock.completeCommitTransactions(SCOPE, STREAM, record, null, executor, Collections.emptyMap()).join();
// verify that transaction is removed from active txn
AssertExtensions.assertFutureThrows("Fetching Active Txn record should throw DNF", streamStoreMock.getTransactionData(SCOPE, STREAM, txnId, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
assertEquals(PingTxnStatus.Status.COMMITTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// Verify Ping transaction on an aborting transaction.
txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 1024 * 1024L).join();
txnId = txn.getKey().getId();
txnTasks.abortTxn(SCOPE, STREAM, txnId, null, 0L).join();
assertEquals(PingTxnStatus.Status.ABORTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// now complete abort so that the transaction is removed from active txn and added to completed txn.
streamStoreMock.abortTransaction(SCOPE, STREAM, txnId, null, executor).join();
AssertExtensions.assertFutureThrows("Fetching Active Txn record should throw DNF", streamStoreMock.getTransactionData(SCOPE, STREAM, txnId, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
assertEquals(PingTxnStatus.Status.ABORTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// try with a non existent transaction id
assertEquals(PingTxnStatus.Status.UNKNOWN, txnTasks.pingTxn(SCOPE, STREAM, UUID.randomUUID(), 10000L, 0L).join().getStatus());
// Verify max execution time.
txnTasks.setMaxExecutionTime(1L);
txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 1024 * 1024L).join();
UUID tid = txn.getKey().getId();
AssertExtensions.assertEventuallyEquals(PingTxnStatus.Status.MAX_EXECUTION_TIME_EXCEEDED, () -> txnTasks.pingTxn(SCOPE, STREAM, tid, 10000L, 0L).join().getStatus(), 10000L);
txnTasks.setMaxExecutionTime(Duration.ofDays(Config.MAX_TXN_EXECUTION_TIMEBOUND_DAYS).toMillis());
}
use of io.pravega.controller.store.stream.VersionedTransactionData in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method commitAbortTests.
@Test(timeout = 5000)
@SuppressWarnings("unchecked")
public void commitAbortTests() {
// Create mock writer objects.
final List<CompletableFuture<Void>> commitWriterResponses = getWriteResultSequence(5);
final List<CompletableFuture<Void>> abortWriterResponses = getWriteResultSequence(5);
EventStreamWriter<CommitEvent> commitWriter = Mockito.mock(EventStreamWriter.class);
Mockito.when(commitWriter.writeEvent(anyString(), any())).thenAnswer(new SequenceAnswer<>(commitWriterResponses));
EventStreamWriter<AbortEvent> abortWriter = Mockito.mock(EventStreamWriter.class);
Mockito.when(abortWriter.writeEvent(anyString(), any())).thenAnswer(new SequenceAnswer<>(abortWriterResponses));
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
// Create ControllerService.
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, 0, 0L).join());
// Create 2 transactions
final long lease = 5000;
VersionedTransactionData txData1 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
VersionedTransactionData txData2 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
// Commit the first one
TxnStatus status = txnTasks.commitTxn(SCOPE, STREAM, txData1.getId(), 0L).join();
Assert.assertEquals(TxnStatus.COMMITTING, status);
// Abort the second one
status = txnTasks.abortTxn(SCOPE, STREAM, txData2.getId(), txData2.getVersion(), 0L).join();
Assert.assertEquals(TxnStatus.ABORTING, status);
}
Aggregations