use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class StreamMetadataTasksTest method manualScaleTest.
@Test(timeout = 30000)
public void manualScaleTest() throws Exception {
final ScalingPolicy policy = ScalingPolicy.fixed(1);
final StreamConfiguration configuration = StreamConfiguration.builder().scalingPolicy(policy).build();
streamStorePartialMock.createStream(SCOPE, "test", configuration, System.currentTimeMillis(), null, executor).get();
streamStorePartialMock.setState(SCOPE, "test", State.ACTIVE, null, executor).get();
WriterMock requestEventWriter = new WriterMock(streamMetadataTasks, executor);
streamMetadataTasks.setRequestEventWriter(requestEventWriter);
List<Map.Entry<Double, Double>> newRanges = new ArrayList<>();
newRanges.add(new AbstractMap.SimpleEntry<>(0.0, 0.5));
newRanges.add(new AbstractMap.SimpleEntry<>(0.5, 1.0));
ScaleResponse scaleOpResult = streamMetadataTasks.manualScale(SCOPE, "test", Collections.singletonList(0L), newRanges, 30, 0L).get();
assertEquals(ScaleStreamStatus.STARTED, scaleOpResult.getStatus());
OperationContext context = streamStorePartialMock.createStreamContext(SCOPE, "test", 0L);
assertEquals(streamStorePartialMock.getState(SCOPE, "test", false, context, executor).get(), State.ACTIVE);
// Now when runScale runs even after that we should get the state as active.
VersionedMetadata<EpochTransitionRecord> response = streamStorePartialMock.submitScale(SCOPE, "test", Collections.singletonList(0L), new LinkedList<>(newRanges), 30, null, null, executor).get();
assertEquals(response.getObject().getActiveEpoch(), 0);
VersionedMetadata<State> versionedState = streamStorePartialMock.getVersionedState(SCOPE, "test", context, executor).get();
assertEquals(versionedState.getObject(), State.ACTIVE);
// if we call start scale without scale being set to SCALING, this should throw illegal argument exception
AssertExtensions.assertThrows("", () -> streamStorePartialMock.startScale(SCOPE, "test", true, response, versionedState, context, executor).get(), ex -> Exceptions.unwrap(ex) instanceof IllegalArgumentException);
ScaleOperationTask task = new ScaleOperationTask(streamMetadataTasks, streamStorePartialMock, executor);
task.runScale((ScaleOpEvent) requestEventWriter.getEventQueue().take(), true, context).get();
Map<Long, Map.Entry<Double, Double>> segments = response.getObject().getNewSegmentsWithRange();
assertTrue(segments.entrySet().stream().anyMatch(x -> x.getKey() == computeSegmentId(1, 1) && AssertExtensions.nearlyEquals(x.getValue().getKey(), 0.0, 0) && AssertExtensions.nearlyEquals(x.getValue().getValue(), 0.5, 0)));
assertTrue(segments.entrySet().stream().anyMatch(x -> x.getKey() == computeSegmentId(2, 1) && AssertExtensions.nearlyEquals(x.getValue().getKey(), 0.5, 0) && AssertExtensions.nearlyEquals(x.getValue().getValue(), 1.0, 0)));
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class RequestHandlersTest method concurrentRollingTxnCommit.
private void concurrentRollingTxnCommit(String stream, String func, boolean expectFailureOnFirstJob, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount, int expectedVersion) {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
StreamMetadataStore streamStore2 = getStore();
CommitRequestHandler requestHandler1 = new CommitRequestHandler(streamStore1Spied, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
CommitRequestHandler requestHandler2 = new CommitRequestHandler(streamStore2, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
ScaleOperationTask scaleRequesthandler = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
// create txn on epoch 0 and set it to committing
UUID txnId = streamStore1.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnEpoch0 = streamStore1.createTransaction(scope, stream, txnId, 1000L, 10000L, null, executor).join();
streamStore1.sealTransaction(scope, stream, txnId, true, Optional.of(txnEpoch0.getVersion()), "", Long.MIN_VALUE, null, executor).join();
// perform scale
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis());
scaleRequesthandler.execute(event).join();
// regular commit
// start commit transactions
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
// test rolling transaction --> since transaction on epoch 0 is committing, it will get committed first.
CommitEvent commitOnEpoch0 = new CommitEvent(scope, stream, 0);
setMockCommitTxnLatch(streamStore1, streamStore1Spied, func, signal, wait);
// start rolling txn
// stall rolling transaction in different stages
CompletableFuture<Void> future1Rolling = CompletableFuture.completedFuture(null).thenComposeAsync(v -> requestHandler1.execute(commitOnEpoch0), executor);
signal.join();
requestHandler2.execute(commitOnEpoch0).join();
wait.complete(null);
if (expectFailureOnFirstJob) {
AssertExtensions.assertSuppliedFutureThrows("first commit should fail", () -> future1Rolling, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startCommitTransactions"))).startCommitTransactions(anyString(), anyString(), anyInt(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("startRollingTxn"))).startRollingTxn(anyString(), anyString(), anyInt(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("rollingTxnCreateDuplicateEpochs"))).rollingTxnCreateDuplicateEpochs(anyString(), anyString(), any(), anyLong(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeRollingTxn"))).completeRollingTxn(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeCommitTransactions"))).completeCommitTransactions(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
} else {
future1Rolling.join();
}
// validate rolling txn done and first job has updated the CTR with new txn record
VersionedMetadata<CommittingTransactionsRecord> versioned = streamStore1.getVersionedCommittingTransactionsRecord(scope, stream, null, executor).join();
assertEquals(CommittingTransactionsRecord.EMPTY, versioned.getObject());
assertEquals(expectedVersion, getVersionNumber(versioned.getVersion()));
assertEquals(3, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class RequestHandlersTest method testScaleIgnoreFairness.
@Test
public void testScaleIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start scale --> this should fail with a retryable exception while talking to segment store!
ScaleOpEvent scaleEvent = new ScaleOpEvent(fairness, fairness, Collections.singletonList(0L), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// verify that scale was started
assertEquals(State.SCALING, streamStore.getState(fairness, fairness, true, null, executor).join());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(true)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(scaleEvent, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(fairness, fairness, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new scale. it should fail because of waiting processor.
ScaleOpEvent scaleEvent2 = new ScaleOpEvent(fairness, fairness, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ControllerEventProcessorTest method testCommitEventForSealingStream.
@Test(timeout = 60000)
public void testCommitEventForSealingStream() {
ScaleOperationTask scaleTask = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
SealStreamTask sealStreamTask = new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor);
String stream = "commitWithSeal";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build();
streamStore.createStream(SCOPE, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore.setState(SCOPE, stream, State.ACTIVE, null, executor).join();
UUID txnOnEpoch0 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
VersionedTransactionData txnData0 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch0, 10000, 10000, null, executor).join();
Assert.assertNotNull(txnData0);
checkTransactionState(SCOPE, stream, txnOnEpoch0, TxnStatus.OPEN);
streamStore.sealTransaction(SCOPE, stream, txnData0.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTING);
// scale stream
List<Map.Entry<Double, Double>> newRange = new LinkedList<>();
newRange.add(new AbstractMap.SimpleEntry<>(0.0, 1.0));
scaleTask.execute(new ScaleOpEvent(SCOPE, stream, Collections.singletonList(0L), newRange, false, System.currentTimeMillis(), 0L)).join();
UUID txnOnEpoch1 = streamStore.generateTransactionId(SCOPE, stream, null, executor).join();
VersionedTransactionData txnData1 = streamStore.createTransaction(SCOPE, stream, txnOnEpoch1, 10000, 10000, null, executor).join();
Assert.assertNotNull(txnData1);
checkTransactionState(SCOPE, stream, txnOnEpoch1, TxnStatus.OPEN);
streamStore.sealTransaction(SCOPE, stream, txnData1.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
// set the stream to SEALING
streamStore.setState(SCOPE, stream, State.SEALING, null, executor).join();
// attempt to seal the stream. This should fail with postponement.
AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
// now attempt to commit the transaction on epoch 1. epoch in commit event is ignored and transactions on lowest epoch
// should be committed first.
CommitRequestHandler commitEventProcessor = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
checkTransactionState(SCOPE, stream, txnData0.getId(), TxnStatus.COMMITTED);
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTING);
EpochRecord activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
// attempt to seal the stream. This should still fail with postponement.
AssertExtensions.assertFutureThrows("Seal stream should fail with operation not allowed as their are outstanding transactions", sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
// now attempt to commit the transaction on epoch 1.
commitEventProcessor.processEvent(new CommitEvent(SCOPE, stream, txnData1.getEpoch())).join();
checkTransactionState(SCOPE, stream, txnData1.getId(), TxnStatus.COMMITTED);
// verify transaction has rolled over
activeEpoch = streamStore.getActiveEpoch(SCOPE, stream, null, true, executor).join();
assertEquals(3, activeEpoch.getEpoch());
assertEquals(1, activeEpoch.getReferenceEpoch());
// now attempt to seal the stream. it should complete.
sealStreamTask.execute(new SealStreamEvent(SCOPE, stream, 0L)).join();
}
use of io.pravega.shared.controller.event.ScaleOpEvent in project pravega by pravega.
the class ScaleRequestHandlerTest method testInconsistentScaleRequestAfterRollingTxn.
@Test(timeout = 30000)
public void testInconsistentScaleRequestAfterRollingTxn() throws Exception {
// This test checks a scenario where after rolling txn, if an outstanding scale request
// was present, its epoch consistency should fail
String stream = "newStream";
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 2)).build();
streamMetadataTasks.createStream(scope, stream, config, System.currentTimeMillis(), 0L).get();
EventWriterMock writer = new EventWriterMock();
streamMetadataTasks.setRequestEventWriter(writer);
ScaleOperationTask scaleRequestHandler = new ScaleOperationTask(streamMetadataTasks, streamStore, executor);
StreamRequestHandler requestHandler = new StreamRequestHandler(null, scaleRequestHandler, null, null, null, null, null, null, null, streamStore, null, executor);
CommitRequestHandler commitRequestHandler = new CommitRequestHandler(streamStore, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
// 1 create transaction on old epoch and set it to committing
UUID txnIdOldEpoch = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnData = streamStore.createTransaction(scope, stream, txnIdOldEpoch, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnData.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
UUID txnIdOldEpoch2 = streamStore.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnData2 = streamStore.createTransaction(scope, stream, txnIdOldEpoch2, 10000, 10000, null, executor).join();
streamStore.sealTransaction(scope, stream, txnData2.getId(), true, Optional.empty(), "", Long.MIN_VALUE, null, executor).join();
EpochRecord epochZero = streamStore.getActiveEpoch(scope, stream, null, true, executor).join();
assertEquals(0, epochZero.getEpoch());
// 2. start scale
requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 0.25), new AbstractMap.SimpleEntry<>(0.25, 0.5)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false).join();
// 3. verify that scale is complete
State state = streamStore.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, state);
// 4. just submit a new scale. don't let it run. this should create an epoch transition. state should still be active
streamStore.submitScale(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), System.currentTimeMillis(), null, null, executor).join();
// 5. commit on old epoch. this should roll over.
assertTrue(Futures.await(commitRequestHandler.processEvent(new CommitEvent(scope, stream, txnData.getEpoch()))));
TxnStatus txnStatus = streamStore.transactionStatus(scope, stream, txnIdOldEpoch, null, executor).join();
assertEquals(TxnStatus.COMMITTED, txnStatus);
// 6. run scale. this should fail in scaleCreateNewEpochs with IllegalArgumentException with epochTransitionConsistent
AssertExtensions.assertFutureThrows("epoch transition should be inconsistent", requestHandler.process(new ScaleOpEvent(scope, stream, Lists.newArrayList(1L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.5, 0.75), new AbstractMap.SimpleEntry<>(0.75, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis()), () -> false), e -> Exceptions.unwrap(e) instanceof IllegalStateException);
state = streamStore.getState(scope, stream, true, null, executor).join();
assertEquals(State.ACTIVE, state);
}
Aggregations