use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.
the class ControllerEventProcessors method initialize.
private void initialize() {
// region Create commit event processor
EventProcessorGroupConfig commitReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getCommitStreamName()).readerGroupName(config.getCommitReaderGroupName()).eventProcessorCount(config.getCommitReaderGroupSize()).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<CommitEvent> commitConfig = EventProcessorConfig.<CommitEvent>builder().config(commitReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(COMMIT_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(commitRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating commit event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating commit event processor group", e)).run(() -> {
commitEventProcessors = system.createEventProcessorGroup(commitConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create abort event processor
EventProcessorGroupConfig abortReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getAbortStreamName()).readerGroupName(config.getAbortReaderGroupName()).eventProcessorCount(config.getAbortReaderGroupSize()).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<AbortEvent> abortConfig = EventProcessorConfig.<AbortEvent>builder().config(abortReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(ABORT_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(abortRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating abort event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating commit event processor group", e)).run(() -> {
abortEventProcessors = system.createEventProcessorGroup(abortConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create request event processor
EventProcessorGroupConfig requestReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getRequestStreamName()).readerGroupName(config.getRequestReaderGroupName()).eventProcessorCount(1).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<ControllerEvent> requestConfig = EventProcessorConfig.builder().config(requestReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(CONTROLLER_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(streamRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating stream request event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating request event processor group", e)).run(() -> {
requestEventProcessors = system.createEventProcessorGroup(requestConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
// region Create KVtable event processor
EventProcessorGroupConfig kvtReadersConfig = EventProcessorGroupConfigImpl.builder().streamName(config.getKvtStreamName()).readerGroupName(config.getKvtReaderGroupName()).eventProcessorCount(1).checkpointConfig(CheckpointConfig.none()).build();
EventProcessorConfig<ControllerEvent> kvtRequestConfig = EventProcessorConfig.builder().config(kvtReadersConfig).decider(ExceptionHandler.DEFAULT_EXCEPTION_HANDLER).serializer(CONTROLLER_EVENT_SERIALIZER).supplier(() -> new ConcurrentEventProcessor<>(kvtRequestHandler, executor)).minRebalanceIntervalMillis(rebalanceIntervalMillis).build();
log.debug("Creating kvt request event processors");
Retry.indefinitelyWithExpBackoff(DELAY, MULTIPLIER, MAX_DELAY, e -> log.warn("Error creating request event processor group", e)).run(() -> {
kvtRequestEventProcessors = system.createEventProcessorGroup(kvtRequestConfig, checkpointStore, rebalanceExecutor);
return null;
});
// endregion
log.info("Awaiting start of event processors...");
commitEventProcessors.awaitRunning();
log.info("Commit event processor started.");
abortEventProcessors.awaitRunning();
log.info("Abort event processor started.");
requestEventProcessors.awaitRunning();
log.info("Stream request event processor started.");
kvtRequestEventProcessors.awaitRunning();
log.info("KVT request event processor started.");
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.
the class StreamTransactionMetadataTasksTest method idempotentOperationsTests.
@Test(timeout = 10000)
public void idempotentOperationsTests() throws CheckpointStoreException, InterruptedException {
// Create mock writer objects.
EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
// Create transaction tasks.
txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
txnTasks.initializeStreamWriters(commitWriter, abortWriter);
consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
// Create stream and scope
Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), 0L).join());
// Create 2 transactions
final long lease = 5000;
VersionedTransactionData txData1 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
VersionedTransactionData txData2 = txnTasks.createTxn(SCOPE, STREAM, lease, 0L, 1024 * 1024L).join().getKey();
UUID tx1 = txData1.getId();
UUID tx2 = txData2.getId();
Version tx2Version = txData2.getVersion();
// Commit the first one
Assert.assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
// Ensure that transaction state is COMMITTING.
assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Abort the second one
Assert.assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, tx2Version, 0L).join());
// Ensure that transactions state is ABORTING.
assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// when transaction is in COMMITTING state (resp. ABORTING state).
assertEquals(TxnStatus.COMMITTING, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTING, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
// Create commit and abort event processors.
BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new ConcurrentEventProcessor<>(new CommitRequestHandler(streamStore, streamMetadataTasks, txnTasks, bucketStore, executor, processedCommitEvents), executor));
createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, executor, processedAbortEvents), executor));
// Wait until the commit event is processed and ensure that the txn state is COMMITTED.
CommitEvent commitEvent = processedCommitEvents.take();
assertEquals(0, commitEvent.getEpoch());
assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx1, null, executor).join());
// Wait until the abort event is processed and ensure that the txn state is ABORTED.
AbortEvent abortEvent = processedAbortEvents.take();
assertEquals(tx2, abortEvent.getTxid());
assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx2, null, executor).join());
// Ensure that commit (resp. abort) transaction tasks are idempotent
// even after transaction is committed (resp. aborted)
assertEquals(TxnStatus.COMMITTED, txnTasks.commitTxn(SCOPE, STREAM, tx1, 0L).join());
assertEquals(TxnStatus.ABORTED, txnTasks.abortTxn(SCOPE, STREAM, tx2, null, 0L).join());
}
Aggregations