use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasks method sealTxnBody.
/**
* Seals a txn and transitions it to COMMITTING (resp. ABORTING) state if commit param is true (resp. false).
*
* Post-condition:
* 1. If seal completes successfully, then
* (a) txn state is COMMITTING/ABORTING,
* (b) CommitEvent/AbortEvent is present in the commit stream/abort stream,
* (c) txn is removed from host-txn index,
* (d) txn is removed from the timeout service.
*
* 2. If process fails after transitioning txn to COMMITTING/ABORTING state, but before responding to client, then
* since txn is present in the host-txn index, some other controller process shall put CommitEvent/AbortEvent to
* commit stream/abort stream.
*
* @param host host id. It is different from hostId iff invoked from TxnSweeper for aborting orphaned txn.
* @param scope scope name.
* @param stream stream name.
* @param commit boolean indicating whether to commit txn.
* @param txnId txn id.
* @param version expected version of txn node in store.
* @param ctx context.
* @return Txn status after sealing it.
*/
CompletableFuture<TxnStatus> sealTxnBody(final String host, final String scope, final String stream, final boolean commit, final UUID txnId, final Version version, final String writerId, final long timestamp, final OperationContext ctx) {
Preconditions.checkNotNull(ctx, "Operation context cannot be null");
long requestId = ctx.getRequestId();
TxnResource resource = new TxnResource(scope, stream, txnId);
Optional<Version> versionOpt = Optional.ofNullable(version);
// Step 1. Add txn to current host's index, if it is not already present
CompletableFuture<Void> addIndex = host.equals(hostId) && !timeoutService.containsTxn(scope, stream, txnId) ? // then txn would no longer be open.
streamMetadataStore.addTxnToIndex(hostId, resource, version) : CompletableFuture.completedFuture(null);
addIndex.whenComplete((v, e) -> {
if (e != null) {
log.debug(requestId, "Txn={}, already present/newly added to host-txn index of host={}", txnId, hostId);
} else {
log.debug(requestId, "Txn={}, added txn to host-txn index of host={}", txnId, hostId);
}
});
// Step 2. Seal txn
CompletableFuture<AbstractMap.SimpleEntry<TxnStatus, Integer>> sealFuture = addIndex.thenComposeAsync(x -> streamMetadataStore.sealTransaction(scope, stream, txnId, commit, versionOpt, writerId, timestamp, ctx, executor), executor).whenComplete((v, e) -> {
if (e != null) {
log.debug(requestId, "Txn={}, failed sealing txn", txnId);
} else {
log.debug(requestId, "Txn={}, sealed successfully, commit={}", txnId, commit);
}
});
// Step 3. write event to corresponding stream.
return sealFuture.thenComposeAsync(pair -> {
TxnStatus status = pair.getKey();
switch(status) {
case COMMITTING:
return writeCommitEvent(scope, stream, pair.getValue(), txnId, status, requestId);
case ABORTING:
return writeAbortEvent(scope, stream, pair.getValue(), txnId, status, requestId);
case ABORTED:
case COMMITTED:
return CompletableFuture.completedFuture(status);
case OPEN:
case UNKNOWN:
default:
// exception would be thrown.
return CompletableFuture.completedFuture(status);
}
}, executor).thenComposeAsync(status -> {
// Step 4. Remove txn from timeoutService, and from the index.
timeoutService.removeTxn(scope, stream, txnId);
log.debug(requestId, "Txn={}, removed from timeout service", txnId);
return streamMetadataStore.removeTxnFromIndex(host, resource, true).whenComplete((v, e) -> {
if (e != null) {
log.debug(requestId, "Txn={}, failed removing txn from host-txn index of host={}", txnId, hostId);
} else {
log.debug(requestId, "Txn={}, removed txn from host-txn index of host={}", txnId, hostId);
}
}).thenApply(x -> status);
}, executor);
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class ControllerEventProcessorsTest method testEventKey.
@Test(timeout = 10000)
public void testEventKey() {
UUID txid = UUID.randomUUID();
String scope = "test";
String stream = "test";
AbortEvent abortEvent = new AbortEvent(scope, stream, 0, txid, 21L);
CommitEvent commitEvent = new CommitEvent(scope, stream, 0);
assertEquals(abortEvent.getKey(), "test/test");
assertEquals(commitEvent.getKey(), "test/test");
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method failOverTests.
@Test(timeout = 60000)
public void failOverTests() throws Exception {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
// Create test scope and stream.
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), 0L).join());
// Set up txn task for creating transactions from a failedHost.
@Cleanup StreamTransactionMetadataTasks failedTxnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "failedHost", GrpcAuthHelper.getDisabledAuthHelper());
failedTxnTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
// Create 3 transactions from failedHost.
VersionedTransactionData tx1 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
VersionedTransactionData tx2 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
VersionedTransactionData tx3 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
VersionedTransactionData tx4 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
// Ping another txn from failedHost.
PingTxnStatus pingStatus = failedTxnTasks.pingTxn(SCOPE, STREAM, tx4.getId(), 10000, 0L).join();
VersionedTransactionData tx4get = streamStore.getTransactionData(SCOPE, STREAM, tx4.getId(), null, executor).join();
// Validate versions of all txn
Assert.assertEquals(0, tx1.getVersion().asIntVersion().getIntValue());
Assert.assertEquals(0, tx2.getVersion().asIntVersion().getIntValue());
Assert.assertEquals(0, tx3.getVersion().asIntVersion().getIntValue());
Assert.assertEquals(1, tx4get.getVersion().asIntVersion().getIntValue());
Assert.assertEquals(PingTxnStatus.Status.OK, pingStatus.getStatus());
// Validate the txn index.
Assert.assertEquals(1, streamStore.listHostsOwningTxn().join().size());
// Change state of one txn to COMMITTING.
TxnStatus txnStatus2 = streamStore.sealTransaction(SCOPE, STREAM, tx2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.COMMITTING, txnStatus2);
// Change state of another txn to ABORTING.
TxnStatus txnStatus3 = streamStore.sealTransaction(SCOPE, STREAM, tx3.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
Assert.assertEquals(TxnStatus.ABORTING, txnStatus3);
// Create transaction tasks for sweeping txns from failedHost.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
TxnSweeper txnSweeper = new TxnSweeper(streamStore, txnTasks, 100, executor);
// Before initializing, txnSweeper.sweepFailedHosts would throw an error
AssertExtensions.assertFutureThrows("IllegalStateException before initialization", txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")), ex -> ex instanceof IllegalStateException);
// Initialize stream writers.
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
// Validate that txnTasks is ready.
assertTrue(txnTasks.isReady());
// Sweep txns that were being managed by failedHost.
txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")).join();
// Validate that sweeping completes correctly.
Set<String> listOfHosts = streamStore.listHostsOwningTxn().join();
Assert.assertEquals(1, listOfHosts.size());
Assert.assertTrue(listOfHosts.contains("host"));
Assert.assertEquals(TxnStatus.OPEN, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
Assert.assertEquals(TxnStatus.OPEN, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
VersionedTransactionData txnData = streamStore.getTransactionData(SCOPE, STREAM, tx1.getId(), null, executor).join();
Assert.assertEquals(1, txnData.getVersion().asIntVersion().getIntValue());
txnData = streamStore.getTransactionData(SCOPE, STREAM, tx4.getId(), null, executor).join();
Assert.assertEquals(2, txnData.getVersion().asIntVersion().getIntValue());
// Create commit and abort event processors.
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new ConcurrentEventProcessor<>(new CommitRequestHandler(streamStore, streamMetadataTasks, txnTasks, bucketStore, executor, processedCommitEvents), executor));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, executor, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(tx2.getEpoch(), commitEvent.getEpoch());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
// Wait until 3 abort events are processed and ensure that the txn state is ABORTED.
Predicate<AbortEvent> predicate = event -> event.getTxid().equals(tx1.getId()) || event.getTxid().equals(tx3.getId()) || event.getTxid().equals(tx4.getId());
AbortEvent abortEvent1 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent1));
AbortEvent abortEvent2 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent2));
AbortEvent abortEvent3 = processedAbortEvents.take();
assertTrue(predicate.test(abortEvent3));
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method writerInitializationTest.
@Test(timeout = 10000)
public void writerInitializationTest() throws Exception {
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
StreamMetadataStore streamStoreMock = spy(StreamStoreFactory.createZKStore(zkClient, executor));
final long leasePeriod = 5000;
// region close before initialize
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
CompletableFuture<Void> future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
assertFalse(future.isDone());
txnTasks.close();
AssertExtensions.assertFutureThrows("", future, e -> Exceptions.unwrap(e) instanceof CancellationException);
// endregion
// region test initialize writers with client factory
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
future = txnTasks.writeCommitEvent(new CommitEvent("scope", "stream", 0));
EventStreamClientFactory cfMock = mock(EventStreamClientFactory.class);
ControllerEventProcessorConfig eventProcConfigMock = mock(ControllerEventProcessorConfig.class);
String commitStream = "commitStream";
doAnswer(x -> commitStream).when(eventProcConfigMock).getCommitStreamName();
doAnswer(x -> commitWriter).when(cfMock).createEventWriter(eq(commitStream), any(), any());
String abortStream = "abortStream";
doAnswer(x -> abortStream).when(eventProcConfigMock).getAbortStreamName();
doAnswer(x -> abortWriter).when(cfMock).createEventWriter(eq(abortStream), any(), any());
// future should not have completed as we have not initialized the writers.
assertFalse(future.isDone());
// initialize the writers. write future should have completed now.
txnTasks.initializeStreamWriters(cfMock, eventProcConfigMock);
assertTrue(Futures.await(future));
txnTasks.close();
// endregion
// region test method calls and initialize writers with direct writer set up method call
txnTasks = new StreamTransactionMetadataTasks(streamStoreMock, SegmentHelperMock.getSegmentHelperMock(), executor, "host", new GrpcAuthHelper(this.authEnabled, "secret", 300));
streamStore.createScope(SCOPE, null, executor).join();
streamStore.createStream(SCOPE, STREAM, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), 1L, null, executor).join();
streamStore.setState(SCOPE, STREAM, State.ACTIVE, null, executor).join();
CompletableFuture<Pair<VersionedTransactionData, List<StreamSegmentRecord>>> createFuture = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 0L);
// create and ping transactions should not wait for writer initialization and complete immediately.
createFuture.join();
assertTrue(Futures.await(createFuture));
UUID txnId = createFuture.join().getKey().getId();
CompletableFuture<PingTxnStatus> pingFuture = txnTasks.pingTxn(SCOPE, STREAM, txnId, leasePeriod, 0L);
assertTrue(Futures.await(pingFuture));
CompletableFuture<TxnStatus> commitFuture = txnTasks.commitTxn(SCOPE, STREAM, txnId, 0L);
assertFalse(commitFuture.isDone());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
assertTrue(Futures.await(commitFuture));
UUID txnId2 = txnTasks.createTxn(SCOPE, STREAM, leasePeriod, 0L, 1024 * 1024L).join().getKey().getId();
assertTrue(Futures.await(txnTasks.abortTxn(SCOPE, STREAM, txnId2, null, 0L)));
}
use of io.pravega.shared.controller.event.AbortEvent in project pravega by pravega.
the class ControllerEventProcessorTest method testAbortEventProcessor.
@Test(timeout = 10000)
public void testAbortEventProcessor() {
UUID txnId = streamStore.generateTransactionId(SCOPE, STREAM, null, executor).join();
VersionedTransactionData txnData = streamStore.createTransaction(SCOPE, STREAM, txnId, 10000, 10000, null, executor).join();
Assert.assertNotNull(txnData);
checkTransactionState(SCOPE, STREAM, txnId, TxnStatus.OPEN);
streamStore.sealTransaction(SCOPE, STREAM, txnData.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
checkTransactionState(SCOPE, STREAM, txnData.getId(), TxnStatus.ABORTING);
AbortRequestHandler abortRequestHandler = new AbortRequestHandler(streamStore, streamMetadataTasks, executor);
abortRequestHandler.processEvent(new AbortEvent(SCOPE, STREAM, txnData.getEpoch(), txnData.getId(), 11L)).join();
checkTransactionState(SCOPE, STREAM, txnData.getId(), TxnStatus.ABORTED);
}
Aggregations