Search in sources :

Example 6 with CommitRequestHandler

use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.

the class ControllerEventProcessorPravegaTablesStreamTest method testTxnPartialCommitRetry.

@Test(timeout = 10000)
public void testTxnPartialCommitRetry() {
    PravegaTablesStoreHelper storeHelper = spy(new PravegaTablesStoreHelper(SegmentHelperMock.getSegmentHelperMockForTables(executor), GrpcAuthHelper.getDisabledAuthHelper(), executor));
    this.streamStore = new PravegaTablesStreamMetadataStore(PRAVEGA_ZK_CURATOR_RESOURCE.client, executor, Duration.ofHours(Config.COMPLETED_TRANSACTION_TTL_IN_HOURS), storeHelper);
    SegmentHelper segmentHelperMock = SegmentHelperMock.getSegmentHelperMock();
    EventHelper eventHelperMock = EventHelperMock.getEventHelperMock(executor, "1", ((AbstractStreamMetadataStore) this.streamStore).getHostTaskIndex());
    StreamMetadataTasks streamMetadataTasks = new StreamMetadataTasks(streamStore, this.bucketStore, TaskStoreFactory.createInMemoryStore(executor), segmentHelperMock, executor, "1", GrpcAuthHelper.getDisabledAuthHelper(), eventHelperMock);
    StreamTransactionMetadataTasks streamTransactionMetadataTasks = new StreamTransactionMetadataTasks(this.streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    streamTransactionMetadataTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
    String scope = "scope";
    String stream = "stream";
    // region createStream
    final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    streamStore.createScope(scope, null, executor).join();
    long start = System.currentTimeMillis();
    streamStore.createStream(scope, stream, configuration1, start, null, executor).join();
    streamStore.setState(scope, stream, State.ACTIVE, null, executor).join();
    StreamMetadataTasks spyStreamMetadataTasks = spy(streamMetadataTasks);
    List<VersionedTransactionData> txnDataList = createAndCommitTransactions(3);
    int epoch = txnDataList.get(0).getEpoch();
    spyStreamMetadataTasks.setRequestEventWriter(new EventStreamWriterMock<>());
    CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, spyStreamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    final String committingTxnsRecordKey = "committingTxns";
    long failingClientRequestId = 123L;
    doReturn(failingClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
    OperationContext context = this.streamStore.createStreamContext(scope, stream, failingClientRequestId);
    streamStore.startCommitTransactions(scope, stream, 100, context, executor).join();
    doReturn(Futures.failedFuture(new RuntimeException())).when(storeHelper).updateEntry(anyString(), eq(committingTxnsRecordKey), any(), ArgumentMatchers.<Function<String, byte[]>>any(), any(), eq(failingClientRequestId));
    AssertExtensions.assertFutureThrows("Updating CommittingTxnRecord fails", commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)), e -> Exceptions.unwrap(e) instanceof RuntimeException);
    verify(storeHelper, times(1)).removeEntries(anyString(), any(), eq(failingClientRequestId));
    VersionedMetadata<CommittingTransactionsRecord> versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
    CommittingTransactionsRecord commitRecord = versionedCommitRecord.getObject();
    assertFalse(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
    for (VersionedTransactionData txnData : txnDataList) {
        checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
    }
    long goodClientRequestId = 4567L;
    doReturn(goodClientRequestId).when(spyStreamMetadataTasks).getRequestId(any());
    commitEventProcessor.processEvent(new CommitEvent(scope, stream, epoch)).join();
    versionedCommitRecord = this.streamStore.getVersionedCommittingTransactionsRecord(scope, stream, context, executor).join();
    commitRecord = versionedCommitRecord.getObject();
    assertTrue(CommittingTransactionsRecord.EMPTY.equals(commitRecord));
    for (VersionedTransactionData txnData : txnDataList) {
        checkTransactionState(scope, stream, txnData.getId(), TxnStatus.COMMITTED);
    }
}
Also used : OperationContext(io.pravega.controller.store.stream.OperationContext) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) CommittingTransactionsRecord(io.pravega.controller.store.stream.records.CommittingTransactionsRecord) PravegaTablesStreamMetadataStore(io.pravega.controller.store.stream.PravegaTablesStreamMetadataStore) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) SegmentHelper(io.pravega.controller.server.SegmentHelper) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) PravegaTablesStoreHelper(io.pravega.controller.store.PravegaTablesStoreHelper) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) EventHelper(io.pravega.controller.task.EventHelper) StreamTransactionMetadataTasks(io.pravega.controller.task.Stream.StreamTransactionMetadataTasks) CommitEvent(io.pravega.shared.controller.event.CommitEvent) StreamMetadataTasks(io.pravega.controller.task.Stream.StreamMetadataTasks) Test(org.junit.Test)

Example 7 with CommitRequestHandler

use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.

the class ScaleRequestHandlerTest method testMigrateManualScaleRequestAfterRollingTxn.

@Test(timeout = 30000)
public void testMigrateManualScaleRequestAfterRollingTxn() throws Exception {
    // This test checks a scenario where after rolling txn, if an outstanding scale request
    // was present, its epoch consistency should fail
    String stream = "newStream";
    StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 2)).build();
    streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
    EventWriterMock writer = new EventWriterMock();
    streamMetadataTasks.setRequestEventWriter(writer);
    ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
    StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
    CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    // 1 create transaction on old epoch and set it to committing
    UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    UUID txnIdOldEpoch2 = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnData2 = streamStore.createTransaction(scope, stream, txnIdOldEpoch2, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnData2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(0, epochZero.getEpoch());
    // 2. start scale
    requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
    // 3. verify that scale is complete
    State state = streamStore.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, state);
    // 4. just submit a new scale. don't let it run. this should create an epoch transition. state should still be active
    streamStore.submitScale(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), System.currentTimeMillis(), null, null, executor).join();
    // 5. commit on old epoch. this should roll over.
    assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
    TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
    assertEquals(TxnStatus.COMMITTED, txnStatus);
    // 6. run scale against old record but with manual scale flag set to true. This should be migrated to new epoch and processed.
    requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), true, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
    state = streamStore.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, state);
    EpochRecord epoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(4, epoch.getEpoch());
}
Also used : EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) AbstractMap(java.util.AbstractMap) StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) State(io.pravega.controller.store.stream.State) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) CommitEvent(io.pravega.shared.controller.event.CommitEvent) UUID(java.util.UUID) TxnStatus(io.pravega.controller.store.stream.TxnStatus) Test(org.junit.Test)

Example 8 with CommitRequestHandler

use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.

the class ScaleRequestHandlerTest method testScaleWithTransactionRequest.

@Test(timeout = 30000)
public void testScaleWithTransactionRequest() throws InterruptedException {
    EventWriterMock writer = new EventWriterMock();
    streamMetadataTasks.setRequestEventWriter(writer);
    ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
    StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
    CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    // 1 create transaction on old epoch and set it to committing
    UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(0, epochZero.getEpoch());
    // 2. start scale
    requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L, 1L, 2L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
    // 3. verify that scale is complete
    State state = streamStore.getState(scope, stream, true, null, executor).join();
    assertEquals(State.ACTIVE, state);
    EpochRecord epochOne = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(1, epochOne.getEpoch());
    // 4. create transaction -> verify that this is created on new epoch
    UUID txnIdNewEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
    VersionedTransactionData txnDataNew = streamStore.createTransaction(scope, stream, txnIdNewEpoch, 10000, 10000, null, executor).join();
    streamStore.sealTransaction(scope, stream, txnDataNew.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    // 5. commit on old epoch. this should roll over
    assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
    TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
    assertEquals(TxnStatus.COMMITTED, txnStatus);
    EpochRecord epochTwo = streamStore.getEpoch(scope, stream, 2, null, executor).join();
    EpochRecord epochThree = streamStore.getEpoch(scope, stream, 3, null, executor).join();
    assertEquals(0, epochTwo.getReferenceEpoch());
    assertEquals(epochZero.getSegments().size(), epochTwo.getSegments().size());
    assertEquals(epochZero.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()), epochTwo.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()));
    assertEquals(1, epochThree.getReferenceEpoch());
    assertEquals(epochOne.getSegments().size(), epochThree.getSegments().size());
    assertEquals(epochOne.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()), epochThree.getSegments().stream().map(x -> NameUtils.getSegmentNumber(x.segmentId())).collect(Collectors.toSet()));
    EpochRecord activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(epochThree, activeEpoch);
    // 6. commit on new epoch. This should happen on duplicate of new epoch successfully
    assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnDataNew.getEpoch()))));
    txnStatus = streamStore.transactionStatus(scope, stream, txnIdNewEpoch, null, executor).join();
    assertEquals(TxnStatus.COMMITTED, txnStatus);
    activeEpoch = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
    assertEquals(epochThree, activeEpoch);
}
Also used : StreamRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler) EpochRecord(io.pravega.controller.store.stream.records.EpochRecord) State(io.pravega.controller.store.stream.State) CommitEvent(io.pravega.shared.controller.event.CommitEvent) ScaleOperationTask(io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask) UUID(java.util.UUID) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) TxnStatus(io.pravega.controller.store.stream.TxnStatus) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) ScaleOpEvent(io.pravega.shared.controller.event.ScaleOpEvent) Test(org.junit.Test)

Example 9 with CommitRequestHandler

use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.

the class StreamTransactionMetadataTasksTest method failOverTests.

@Test(timeout = 60000)
public void failOverTests() throws Exception {
    // Create mock writer objects.
    EventStreamWriterMock<CommitEvent> commitWriter = new EventStreamWriterMock<>();
    EventStreamWriterMock<AbortEvent> abortWriter = new EventStreamWriterMock<>();
    EventStreamReader<CommitEvent> commitReader = commitWriter.getReader();
    EventStreamReader<AbortEvent> abortReader = abortWriter.getReader();
    txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    txnTasks.initializeStreamWriters(commitWriter, abortWriter);
    consumer = new ControllerService(kvtStore, kvtMetadataTasks, streamStore, bucketStore, streamMetadataTasks, txnTasks, segmentHelperMock, executor, null, requestTracker);
    // Create test scope and stream.
    final ScalingPolicy policy1 = ScalingPolicy.fixed(2);
    final StreamConfiguration configuration1 = StreamConfiguration.builder().scalingPolicy(policy1).build();
    Assert.assertEquals(Controller.CreateScopeStatus.Status.SUCCESS, consumer.createScope(SCOPE, 0L).join().getStatus());
    Assert.assertEquals(Controller.CreateStreamStatus.Status.SUCCESS, streamMetadataTasks.createStream(SCOPE, STREAM, configuration1, System.currentTimeMillis(), 0L).join());
    // Set up txn task for creating transactions from a failedHost.
    @Cleanup StreamTransactionMetadataTasks failedTxnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "failedHost", GrpcAuthHelper.getDisabledAuthHelper());
    failedTxnTasks.initializeStreamWriters(new EventStreamWriterMock<>(), new EventStreamWriterMock<>());
    // Create 3 transactions from failedHost.
    VersionedTransactionData tx1 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
    VersionedTransactionData tx2 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
    VersionedTransactionData tx3 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
    VersionedTransactionData tx4 = failedTxnTasks.createTxn(SCOPE, STREAM, 10000, 0L, 0L).join().getKey();
    // Ping another txn from failedHost.
    PingTxnStatus pingStatus = failedTxnTasks.pingTxn(SCOPE, STREAM, tx4.getId(), 10000, 0L).join();
    VersionedTransactionData tx4get = streamStore.getTransactionData(SCOPE, STREAM, tx4.getId(), null, executor).join();
    // Validate versions of all txn
    Assert.assertEquals(0, tx1.getVersion().asIntVersion().getIntValue());
    Assert.assertEquals(0, tx2.getVersion().asIntVersion().getIntValue());
    Assert.assertEquals(0, tx3.getVersion().asIntVersion().getIntValue());
    Assert.assertEquals(1, tx4get.getVersion().asIntVersion().getIntValue());
    Assert.assertEquals(PingTxnStatus.Status.OK, pingStatus.getStatus());
    // Validate the txn index.
    Assert.assertEquals(1, streamStore.listHostsOwningTxn().join().size());
    // Change state of one txn to COMMITTING.
    TxnStatus txnStatus2 = streamStore.sealTransaction(SCOPE, STREAM, tx2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
    Assert.assertEquals(TxnStatus.COMMITTING, txnStatus2);
    // Change state of another txn to ABORTING.
    TxnStatus txnStatus3 = streamStore.sealTransaction(SCOPE, STREAM, tx3.getId(), false, Optional.empty(), "", Long.MIN_VALUE, null, executor).thenApply(AbstractMap.SimpleEntry::getKey).join();
    Assert.assertEquals(TxnStatus.ABORTING, txnStatus3);
    // Create transaction tasks for sweeping txns from failedHost.
    txnTasks = new StreamTransactionMetadataTasks(streamStore, segmentHelperMock, executor, "host", GrpcAuthHelper.getDisabledAuthHelper());
    TxnSweeper txnSweeper = new TxnSweeper(streamStore, txnTasks, 100, executor);
    // Before initializing, txnSweeper.sweepFailedHosts would throw an error
    AssertExtensions.assertFutureThrows("IllegalStateException before initialization", txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")), ex -> ex instanceof IllegalStateException);
    // Initialize stream writers.
    txnTasks.initializeStreamWriters(commitWriter, abortWriter);
    // Validate that txnTasks is ready.
    assertTrue(txnTasks.isReady());
    // Sweep txns that were being managed by failedHost.
    txnSweeper.sweepFailedProcesses(() -> Collections.singleton("host")).join();
    // Validate that sweeping completes correctly.
    Set<String> listOfHosts = streamStore.listHostsOwningTxn().join();
    Assert.assertEquals(1, listOfHosts.size());
    Assert.assertTrue(listOfHosts.contains("host"));
    Assert.assertEquals(TxnStatus.OPEN, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
    Assert.assertEquals(TxnStatus.COMMITTING, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
    Assert.assertEquals(TxnStatus.ABORTING, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
    Assert.assertEquals(TxnStatus.OPEN, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
    VersionedTransactionData txnData = streamStore.getTransactionData(SCOPE, STREAM, tx1.getId(), null, executor).join();
    Assert.assertEquals(1, txnData.getVersion().asIntVersion().getIntValue());
    txnData = streamStore.getTransactionData(SCOPE, STREAM, tx4.getId(), null, executor).join();
    Assert.assertEquals(2, txnData.getVersion().asIntVersion().getIntValue());
    // Create commit and abort event processors.
    BlockingQueue<CommitEvent> processedCommitEvents = new LinkedBlockingQueue<>();
    BlockingQueue<AbortEvent> processedAbortEvents = new LinkedBlockingQueue<>();
    createEventProcessor("commitRG", "commitStream", commitReader, commitWriter, () -> new ConcurrentEventProcessor<>(new CommitRequestHandler(streamStore, streamMetadataTasks, txnTasks, bucketStore, executor, processedCommitEvents), executor));
    createEventProcessor("abortRG", "abortStream", abortReader, abortWriter, () -> new ConcurrentEventProcessor<>(new AbortRequestHandler(streamStore, streamMetadataTasks, executor, processedAbortEvents), executor));
    // Wait until the commit event is processed and ensure that the txn state is COMMITTED.
    CommitEvent commitEvent = processedCommitEvents.take();
    assertEquals(tx2.getEpoch(), commitEvent.getEpoch());
    assertEquals(TxnStatus.COMMITTED, streamStore.transactionStatus(SCOPE, STREAM, tx2.getId(), null, executor).join());
    // Wait until 3 abort events are processed and ensure that the txn state is ABORTED.
    Predicate<AbortEvent> predicate = event -> event.getTxid().equals(tx1.getId()) || event.getTxid().equals(tx3.getId()) || event.getTxid().equals(tx4.getId());
    AbortEvent abortEvent1 = processedAbortEvents.take();
    assertTrue(predicate.test(abortEvent1));
    AbortEvent abortEvent2 = processedAbortEvents.take();
    assertTrue(predicate.test(abortEvent2));
    AbortEvent abortEvent3 = processedAbortEvents.take();
    assertTrue(predicate.test(abortEvent3));
    assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx1.getId(), null, executor).join());
    assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx3.getId(), null, executor).join());
    assertEquals(TxnStatus.ABORTED, streamStore.transactionStatus(SCOPE, STREAM, tx4.getId(), null, executor).join());
}
Also used : EventStreamWriter(io.pravega.client.stream.EventStreamWriter) ArgumentMatchers.eq(org.mockito.ArgumentMatchers.eq) Cleanup(lombok.Cleanup) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) CheckpointConfig(io.pravega.controller.eventProcessor.CheckpointConfig) StoreException(io.pravega.controller.store.stream.StoreException) Pair(org.apache.commons.lang3.tuple.Pair) TaskMetadataStore(io.pravega.controller.store.task.TaskMetadataStore) Duration(java.time.Duration) Mockito.doAnswer(org.mockito.Mockito.doAnswer) CancellationException(java.util.concurrent.CancellationException) Set(java.util.Set) BlockingQueue(java.util.concurrent.BlockingQueue) RequestTracker(io.pravega.common.tracing.RequestTracker) ControllerEvent(io.pravega.shared.controller.event.ControllerEvent) KVTableMetadataStore(io.pravega.controller.store.kvtable.KVTableMetadataStore) Slf4j(lombok.extern.slf4j.Slf4j) EventProcessorSystemImpl(io.pravega.controller.eventProcessor.impl.EventProcessorSystemImpl) Assert.assertFalse(org.junit.Assert.assertFalse) StreamMetadataStore(io.pravega.controller.store.stream.StreamMetadataStore) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) Futures(io.pravega.common.concurrent.Futures) GrpcAuthHelper(io.pravega.controller.server.security.auth.GrpcAuthHelper) Mockito.mock(org.mockito.Mockito.mock) CuratorFrameworkFactory(org.apache.curator.framework.CuratorFrameworkFactory) StreamMetrics(io.pravega.controller.metrics.StreamMetrics) StreamStoreFactory(io.pravega.controller.store.stream.StreamStoreFactory) TransactionMetrics(io.pravega.controller.metrics.TransactionMetrics) ConnectionFactory(io.pravega.client.connection.impl.ConnectionFactory) Mock(org.mockito.Mock) Exceptions(io.pravega.common.Exceptions) Mockito.spy(org.mockito.Mockito.spy) Supplier(java.util.function.Supplier) ArrayList(java.util.ArrayList) Answer(org.mockito.stubbing.Answer) InvocationOnMock(org.mockito.invocation.InvocationOnMock) TestingServerStarter(io.pravega.test.common.TestingServerStarter) ScheduledExecutorService(java.util.concurrent.ScheduledExecutorService) TestingServer(org.apache.curator.test.TestingServer) Before(org.junit.Before) SegmentHelperMock(io.pravega.controller.mocks.SegmentHelperMock) lombok.val(lombok.val) Assert.assertTrue(org.junit.Assert.assertTrue) EventStreamReader(io.pravega.client.stream.EventStreamReader) Test(org.junit.Test) Mockito.times(org.mockito.Mockito.times) TableMetadataTasks(io.pravega.controller.task.KeyValueTable.TableMetadataTasks) TaskStoreFactory(io.pravega.controller.store.task.TaskStoreFactory) Version(io.pravega.controller.store.Version) HostControllerStore(io.pravega.controller.store.host.HostControllerStore) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) Assert(org.junit.Assert) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) Assert.assertEquals(org.junit.Assert.assertEquals) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) CommitEvent(io.pravega.shared.controller.event.CommitEvent) StreamSegmentRecord(io.pravega.controller.store.stream.records.StreamSegmentRecord) SneakyThrows(lombok.SneakyThrows) AssertExtensions(io.pravega.test.common.AssertExtensions) ReaderGroup(io.pravega.client.stream.ReaderGroup) CheckpointStoreException(io.pravega.controller.store.checkpoint.CheckpointStoreException) AtomicInteger(java.util.concurrent.atomic.AtomicInteger) ReaderGroupManager(io.pravega.client.admin.ReaderGroupManager) AbortRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.AbortRequestHandler) After(org.junit.After) Controller(io.pravega.controller.stream.api.grpc.v1.Controller) ExceptionHandler(io.pravega.controller.eventProcessor.ExceptionHandler) EventProcessorConfig(io.pravega.controller.eventProcessor.EventProcessorConfig) Predicate(java.util.function.Predicate) UUID(java.util.UUID) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) List(java.util.List) CuratorFramework(org.apache.curator.framework.CuratorFramework) ConcurrentEventProcessor(io.pravega.controller.eventProcessor.impl.ConcurrentEventProcessor) Config(io.pravega.controller.util.Config) TxnStatus(io.pravega.controller.store.stream.TxnStatus) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) Optional(java.util.Optional) PingTxnStatus(io.pravega.controller.stream.api.grpc.v1.Controller.PingTxnStatus) NotImplementedException(org.apache.commons.lang3.NotImplementedException) ArgumentMatchers.any(org.mockito.ArgumentMatchers.any) ArgumentMatchers.anyLong(org.mockito.ArgumentMatchers.anyLong) SegmentHelper(io.pravega.controller.server.SegmentHelper) EventProcessor(io.pravega.controller.eventProcessor.impl.EventProcessor) CheckpointStoreFactory(io.pravega.controller.store.checkpoint.CheckpointStoreFactory) EventProcessorGroupConfigImpl(io.pravega.controller.eventProcessor.impl.EventProcessorGroupConfigImpl) CompletableFuture(java.util.concurrent.CompletableFuture) BucketStore(io.pravega.controller.store.stream.BucketStore) AbortEvent(io.pravega.shared.controller.event.AbortEvent) EventSerializer(io.pravega.controller.eventProcessor.EventSerializer) ExponentialBackoffRetry(org.apache.curator.retry.ExponentialBackoffRetry) EventStreamClientFactory(io.pravega.client.EventStreamClientFactory) HostMonitorConfigImpl(io.pravega.controller.store.host.impl.HostMonitorConfigImpl) EventWriterConfig(io.pravega.client.stream.EventWriterConfig) ControllerService(io.pravega.controller.server.ControllerService) Iterator(java.util.Iterator) ControllerEventProcessorConfig(io.pravega.controller.server.eventProcessor.ControllerEventProcessorConfig) HostStoreFactory(io.pravega.controller.store.host.HostStoreFactory) ImmutablePair(org.apache.commons.lang3.tuple.ImmutablePair) Mockito.verify(org.mockito.Mockito.verify) EventProcessorGroupConfig(io.pravega.controller.eventProcessor.EventProcessorGroupConfig) Mockito(org.mockito.Mockito) AbstractMap(java.util.AbstractMap) State(io.pravega.controller.store.stream.State) ExecutorServiceHelpers(io.pravega.common.concurrent.ExecutorServiceHelpers) Collections(java.util.Collections) PingTxnStatus(io.pravega.controller.stream.api.grpc.v1.Controller.PingTxnStatus) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) VersionedTransactionData(io.pravega.controller.store.stream.VersionedTransactionData) LinkedBlockingQueue(java.util.concurrent.LinkedBlockingQueue) ControllerService(io.pravega.controller.server.ControllerService) Cleanup(lombok.Cleanup) AbstractMap(java.util.AbstractMap) StreamConfiguration(io.pravega.client.stream.StreamConfiguration) CommitEvent(io.pravega.shared.controller.event.CommitEvent) AbortEvent(io.pravega.shared.controller.event.AbortEvent) ScalingPolicy(io.pravega.client.stream.ScalingPolicy) EventStreamWriterMock(io.pravega.controller.mocks.EventStreamWriterMock) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) AbortRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.AbortRequestHandler) TxnStatus(io.pravega.controller.store.stream.TxnStatus) PingTxnStatus(io.pravega.controller.stream.api.grpc.v1.Controller.PingTxnStatus) Test(org.junit.Test)

Example 10 with CommitRequestHandler

use of io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler in project pravega by pravega.

the class RequestHandlersTest method testCommitTxnIgnoreFairness.

@Test
public void testCommitTxnIgnoreFairness() {
    CommitRequestHandler requestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
    String fairness = "fairness";
    streamStore.createScope(fairness, null, executor).join();
    streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
    UUID txn = streamTransactionMetadataTasks.createTxn(fairness, fairness, 30000, 0L, 1024 * 1024L).join().getKey().getId();
    streamStore.sealTransaction(fairness, fairness, txn, true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
    // 1. set segment helper mock to throw exception
    doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).mergeTxnSegments(anyString(), anyString(), anyLong(), anyLong(), any(), anyString(), anyLong());
    streamStore.startCommitTransactions(fairness, fairness, 100, null, executor).join();
    // 2. start process --> this should fail with a retryable exception while talking to segment store!
    streamStore.setState(fairness, fairness, State.COMMITTING_TXN, null, executor).join();
    assertEquals(State.COMMITTING_TXN, streamStore.getState(fairness, fairness, true, null, executor).join());
    CommitEvent event = new CommitEvent(fairness, fairness, 0);
    AssertExtensions.assertFutureThrows("", requestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
    verify(segmentHelper, atLeastOnce()).mergeTxnSegments(anyString(), anyString(), anyLong(), anyLong(), any(), anyString(), anyLong());
    // 3. set waiting processor to "random name"
    streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
    // 4. reset segment helper to return success
    doAnswer(x -> CompletableFuture.completedFuture(0L)).when(segmentHelper).mergeTxnSegments(anyString(), anyString(), anyLong(), anyLong(), any(), anyString(), anyLong());
    // 5. process again. it should succeed while ignoring waiting processor
    requestHandler.process(event, () -> false).join();
    assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
    // 6. run a new update. it should fail because of waiting processor.
    CommitEvent event2 = new CommitEvent(fairness, fairness, 0);
    AssertExtensions.assertFutureThrows("", requestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
    streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
Also used : CommitEvent(io.pravega.shared.controller.event.CommitEvent) ArgumentMatchers.anyString(org.mockito.ArgumentMatchers.anyString) UUID(java.util.UUID) CommitRequestHandler(io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler) StoreException(io.pravega.controller.store.stream.StoreException) Test(org.junit.Test)

Aggregations

CommitRequestHandler (io.pravega.controller.server.eventProcessor.requesthandlers.CommitRequestHandler)17 CommitEvent (io.pravega.shared.controller.event.CommitEvent)17 VersionedTransactionData (io.pravega.controller.store.stream.VersionedTransactionData)14 Test (org.junit.Test)14 UUID (java.util.UUID)12 StreamConfiguration (io.pravega.client.stream.StreamConfiguration)9 ScaleOperationTask (io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask)7 ScaleOpEvent (io.pravega.shared.controller.event.ScaleOpEvent)6 ArgumentMatchers.anyString (org.mockito.ArgumentMatchers.anyString)6 StreamRequestHandler (io.pravega.controller.server.eventProcessor.requesthandlers.StreamRequestHandler)5 StoreException (io.pravega.controller.store.stream.StoreException)4 StreamMetadataStore (io.pravega.controller.store.stream.StreamMetadataStore)4 EpochRecord (io.pravega.controller.store.stream.records.EpochRecord)4 AbstractMap (java.util.AbstractMap)4 CompletableFuture (java.util.concurrent.CompletableFuture)4 ScalingPolicy (io.pravega.client.stream.ScalingPolicy)3 State (io.pravega.controller.store.stream.State)3 TxnStatus (io.pravega.controller.store.stream.TxnStatus)3 Exceptions (io.pravega.common.Exceptions)2 ExecutorServiceHelpers (io.pravega.common.concurrent.ExecutorServiceHelpers)2