use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method idempotentOperationsTests.
@Test(timeout = 10000)
public void idempotentOperationsTests() throws CheckpointStoreException, InterruptedException {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), 0L).join());
// Create 2 transactions
final long lease = 5000;
VersionedTransactionData txData1 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
VersionedTransactionData txData2 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
UUID tx1 = txData1.getId();
UUID tx2 = txData2.getId();
Version tx2Version = txData2.getVersion();
// Commit the first one
Assert.assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
// Ensure that transaction state is COMMITTING.
assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Abort the second one
Assert.assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, tx2Version, 0L).join());
// Ensure that transactions state is ABORTING.
assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// when transaction is in COMMITTING state (resp. ABORTING state).
assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
// Create commit and abort event processors.
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new ConcurrentEventProcessor<>(new CommitRequestHandler(streamStore, streamMetadataTasks, txnTasks, bucketStore, executor, processedCommitEvents), executor));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, executor, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(0, commitEvent.getEpoch());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Wait until the abort event is processed and ensure that the txn state is ABORTED.
AbortEvent abortEvent = processedAbortEvents.take();
assertEquals(tx2, abortEvent.getTxid());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// even after transaction is committed (resp. aborted)
assertEquals(TxnStatus.COMMITTED, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTED, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method txnPingTest.
@Test(timeout = 10000)
public void txnPingTest() throws Exception {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
streamStoreMock.createScope(SCOPE, null, executor).join();
streamStoreMock.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), null, executor).join();
streamStoreMock.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
// Verify Ping transaction on committing transaction.
Pair<VersionedTransactionData, List<StreamSegmentRecord>> txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 0L).join();
UUID txnId = txn.getKey().getId();
txnTasks.commitTxn(SCOPE, STREAM, txnId, 0L).join();
assertEquals(PingTxnStatus.Status.COMMITTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// complete commit of transaction.
streamStoreMock.startCommitTransactions(SCOPE, STREAM, 100, null, executor).join();
val record = streamStoreMock.getVersionedCommittingTransactionsRecord(SCOPE, STREAM, null, executor).join();
streamStoreMock.completeCommitTransactions(SCOPE, STREAM, record, null, executor, Collections.emptyMap()).join();
// verify that transaction is removed from active txn
AssertExtensions.assertFutureThrows("Fetching Active Txn record should throw DNF", streamStoreMock.getTransactionData(SCOPE, STREAM, txnId, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
assertEquals(PingTxnStatus.Status.COMMITTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// Verify Ping transaction on an aborting transaction.
txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 1024 * 1024L).join();
txnId = txn.getKey().getId();
txnTasks.abortTxn(SCOPE, STREAM, txnId, null, 0L).join();
assertEquals(PingTxnStatus.Status.ABORTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// now complete abort so that the transaction is removed from active txn and added to completed txn.
streamStoreMock.abortTransaction(SCOPE, STREAM, txnId, null, executor).join();
AssertExtensions.assertFutureThrows("Fetching Active Txn record should throw DNF", streamStoreMock.getTransactionData(SCOPE, STREAM, txnId, null, executor), e -> Exceptions.unwrap(e) instanceof StoreException.DataNotFoundException);
assertEquals(PingTxnStatus.Status.ABORTED, txnTasks.pingTxn(SCOPE, STREAM, txnId, 10000L, 0L).join().getStatus());
// try with a non existent transaction id
assertEquals(PingTxnStatus.Status.UNKNOWN, txnTasks.pingTxn(SCOPE, STREAM, UUID.randomUUID(), 10000L, 0L).join().getStatus());
// Verify max execution time.
txnTasks.setMaxExecutionTime(1L);
txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 1024 * 1024L).join();
UUID tid = txn.getKey().getId();
AssertExtensions.assertEventuallyEquals(PingTxnStatus.Status.MAX_EXECUTION_TIME_EXCEEDED, () -> txnTasks.pingTxn(SCOPE, STREAM, tid, 10000L, 0L).join().getStatus(), 10000L);
txnTasks.setMaxExecutionTime(Duration.ofDays(Config.MAX_TXN_EXECUTION_TIMEBOUND_DAYS).toMillis());
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method commitAbortTests.
@Test(timeout = 5000)
@SuppressWarnings("unchecked")
public void commitAbortTests() {
// Create mock writer objects.
final List<CompletableFuture<Void>> commitWriterResponses = getWriteResultSequence(5);
final List<CompletableFuture<Void>> abortWriterResponses = getWriteResultSequence(5);
EventStreamWriter<CommitEvent> commitWriter = Mockito.mock(EventStreamWriter.class);
Mockito.when(commitWriter.writeEvent(anyString(), any())).thenAnswer(new SequenceAnswer<>(commitWriterResponses));
EventStreamWriter<AbortEvent> abortWriter = Mockito.mock(EventStreamWriter.class);
Mockito.when(abortWriter.writeEvent(anyString(), any())).thenAnswer(new SequenceAnswer<>(abortWriterResponses));
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
// Create ControllerService.
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, 0, 0L).join());
// Create 2 transactions
final long lease = 5000;
VersionedTransactionData txData1 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
VersionedTransactionData txData2 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
// Commit the first one
TxnStatus status = txnTasks.commitTxn(SCOPE, STREAM, txData1.getId(), 0L).join();
Assert.assertEquals(TxnStatus.COMMITTING, status);
// Abort the second one
status = txnTasks.abortTxn(SCOPE, STREAM, txData2.getId(), txData2.getVersion(), 0L).join();
Assert.assertEquals(TxnStatus.ABORTING, status);
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method partialTxnCreationTest.
@Test(timeout = 10000)
public void partialTxnCreationTest() {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, SegmentHelperMock.getFailingSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 600));
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
// Create ControllerService.
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, 0, 0L).join());
// Create partial transaction
final long lease = 10000;
AssertExtensions.assertFutureThrows("Transaction creation fails, although a new txn id gets added to the store", txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L), e -> e instanceof RuntimeException);
// Ensure that exactly one transaction is active on the stream.
Set<UUID> txns = streamStore.getActiveTxns(SCOPE, STREAM, null, executor).join().keySet();
assertEquals(1, txns.size());
// Ensure that transaction state is OPEN.
UUID txn1 = txns.stream().findFirst().get();
assertEquals(TxnStatus.OPEN, streamStore.transactionStatus(SCOPE, STREAM, txn1, null, executor).join());
// Ensure that timeout service knows about the transaction.
assertTrue(txnTasks.getTimeoutService().containsTxn(SCOPE, STREAM, txn1));
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method txnCreationTest.
@Test(timeout = 10000)
public void txnCreationTest() {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 600));
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
streamStoreMock.createScope(SCOPE, null, executor).join();
streamStoreMock.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), null, executor).join();
streamStoreMock.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
// mock streamMetadataStore.generateTxnId should throw excecption first time.
// Note: it should be retried.
// now the id should have been generated
doAnswer(new Answer<CompletableFuture<UUID>>() {
AtomicInteger count = new AtomicInteger(0);
@Override
public CompletableFuture<UUID> answer(InvocationOnMock invocation) throws Throwable {
// first time throw exception.
if (count.getAndIncrement() == 0) {
return Futures.failedFuture(StoreException.create(StoreException.Type.WRITE_CONFLICT, "write conflict on counter update"));
}
// subsequent times call origin method
@SuppressWarnings("unchecked") CompletableFuture<UUID> future = (CompletableFuture<UUID>) invocation.callRealMethod();
return future;
}
}).when(streamStoreMock).generateTransactionId(eq(SCOPE), eq(STREAM), any(), any());
doAnswer(new Answer<CompletableFuture<VersionedTransactionData>>() {
AtomicInteger count = new AtomicInteger(0);
@Override
public CompletableFuture<VersionedTransactionData> answer(InvocationOnMock invocation) throws Throwable {
// first time throw exception.
if (count.getAndIncrement() == 0) {
return Futures.failedFuture(StoreException.create(StoreException.Type.DATA_NOT_FOUND, "Epoch not found"));
}
// subsequent times call origin method
@SuppressWarnings("unchecked") CompletableFuture<VersionedTransactionData> future = (CompletableFuture<VersionedTransactionData>) invocation.callRealMethod();
return future;
}
}).when(streamStoreMock).createTransaction(any(), any(), any(), anyLong(), anyLong(), any(), any());
Pair<VersionedTransactionData, List<StreamSegmentRecord>> txn = txnTasks.createTxn(SCOPE, STREAM, 10000L, 0L, 1024 * 1024L).join();
// verify that generate transaction id is called 3 times
verify(streamStoreMock, times(3)).generateTransactionId(any(), any(), any(), any());
// verify that create transaction is called 2 times
verify(streamStoreMock, times(2)).createTransaction(any(), any(), any(), anyLong(), anyLong(), any(), any());
// verify that the txn id that is generated is of type ""
UUID txnId = txn.getKey().getId();
assertEquals(0, (int) (txnId.getMostSignificantBits() >> 32));
assertEquals(2, txnId.getLeastSignificantBits());
}
Aggregations