use of io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask in project pravega by pravega.
the class RequestHandlersTest method testScaleIgnoreFairness.
@Test
public void testScaleIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start scale --> this should fail with a retryable exception while talking to segment store!
ScaleOpEvent scaleEvent = new ScaleOpEvent(fairness, fairness, Collections.singletonList(0L), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
// verify that scale was started
assertEquals(State.SCALING, streamStore.getState(fairness, fairness, true, null, executor).join());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(true)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(scaleEvent, () -> false).join();
EpochRecord activeEpoch = streamStore.getActiveEpoch(fairness, fairness, null, true, executor).join();
assertEquals(1, activeEpoch.getEpoch());
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new scale. it should fail because of waiting processor.
ScaleOpEvent scaleEvent2 = new ScaleOpEvent(fairness, fairness, Collections.singletonList(NameUtils.computeSegmentId(1, 1)), Collections.singletonList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(scaleEvent2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask in project pravega by pravega.
the class RequestHandlersTest method testTruncateIgnoreFairness.
@Test
public void testTruncateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startTruncation(fairness, fairness, Collections.singletonMap(0L, 0L), null, executor).join();
streamStore.setState(fairness, fairness, State.TRUNCATING, null, executor).join();
assertEquals(State.TRUNCATING, streamStore.getState(fairness, fairness, true, null, executor).join());
TruncateStreamEvent event = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).truncateSegment(anyString(), anyString(), anyLong(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
TruncateStreamEvent event2 = new TruncateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask in project pravega by pravega.
the class RequestHandlersTest method testSealIgnoreFairness.
@Test
public void testSealIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.setState(fairness, fairness, State.SEALING, null, executor).join();
assertEquals(State.SEALING, streamStore.getState(fairness, fairness, true, null, executor).join());
SealStreamEvent event = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).sealSegment(anyString(), anyString(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.SEALED, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor.
SealStreamEvent event2 = new SealStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask in project pravega by pravega.
the class RequestHandlersTest method testUpdateIgnoreFairness.
@Test
public void testUpdateIgnoreFairness() {
StreamRequestHandler streamRequestHandler = new StreamRequestHandler(new AutoScaleTask(streamMetadataTasks, streamStore, executor), new ScaleOperationTask(streamMetadataTasks, streamStore, executor), new UpdateStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new SealStreamTask(streamMetadataTasks, streamTransactionMetadataTasks, streamStore, executor), new DeleteStreamTask(streamMetadataTasks, streamStore, bucketStore, executor), new TruncateStreamTask(streamMetadataTasks, streamStore, executor), new CreateReaderGroupTask(streamMetadataTasks, streamStore, executor), new DeleteReaderGroupTask(streamMetadataTasks, streamStore, executor), new UpdateReaderGroupTask(streamMetadataTasks, streamStore, executor), streamStore, new DeleteScopeTask(streamMetadataTasks, streamStore, kvtStore, kvtTasks, executor), executor);
String fairness = "fairness";
streamStore.createScope(fairness, null, executor).join();
streamMetadataTasks.createStream(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), System.currentTimeMillis(), 0L).join();
// 1. set segment helper mock to throw exception
doAnswer(x -> Futures.failedFuture(new RuntimeException())).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 2. start process --> this should fail with a retryable exception while talking to segment store!
streamStore.startUpdateConfiguration(fairness, fairness, StreamConfiguration.builder().scalingPolicy(ScalingPolicy.fixed(1)).build(), null, executor).join();
streamStore.setState(fairness, fairness, State.UPDATING, null, executor).join();
assertEquals(State.UPDATING, streamStore.getState(fairness, fairness, true, null, executor).join());
UpdateStreamEvent event = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event, () -> false), e -> Exceptions.unwrap(e) instanceof RuntimeException);
verify(segmentHelper, atLeastOnce()).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 3. set waiting processor to "random name"
streamStore.createWaitingRequestIfAbsent(fairness, fairness, "myProcessor", null, executor).join();
// 4. reset segment helper to return success
doAnswer(x -> CompletableFuture.completedFuture(null)).when(segmentHelper).updatePolicy(anyString(), anyString(), any(), anyLong(), anyString(), anyLong());
// 5. process again. it should succeed while ignoring waiting processor
streamRequestHandler.process(event, () -> false).join();
assertEquals(State.ACTIVE, streamStore.getState(fairness, fairness, true, null, executor).join());
// 6. run a new update. it should fail because of waiting processor and our state does not allow us to ignore waiting processor
UpdateStreamEvent event2 = new UpdateStreamEvent(fairness, fairness, 0L);
AssertExtensions.assertFutureThrows("", streamRequestHandler.process(event2, () -> false), e -> Exceptions.unwrap(e) instanceof StoreException.OperationNotAllowedException);
streamStore.deleteWaitingRequestConditionally(fairness, fairness, "myProcessor", null, executor).join();
}
use of io.pravega.controller.server.eventProcessor.requesthandlers.ScaleOperationTask in project pravega by pravega.
the class RequestHandlersTest method concurrentRollingTxnCommit.
private void concurrentRollingTxnCommit(String stream, String func, boolean expectFailureOnFirstJob, Predicate<Throwable> firstExceptionPredicate, Map<String, Integer> invocationCount, int expectedVersion) {
StreamMetadataStore streamStore1 = getStore();
StreamMetadataStore streamStore1Spied = spy(getStore());
StreamConfiguration config = StreamConfiguration.builder().scalingPolicy(ScalingPolicy.byEventRate(1, 2, 1)).build();
streamStore1.createStream(scope, stream, config, System.currentTimeMillis(), null, executor).join();
streamStore1.setState(scope, stream, State.ACTIVE, null, executor).join();
StreamMetadataStore streamStore2 = getStore();
CommitRequestHandler requestHandler1 = new CommitRequestHandler(streamStore1Spied, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
CommitRequestHandler requestHandler2 = new CommitRequestHandler(streamStore2, streamMetadataTasks, streamTransactionMetadataTasks, bucketStore, executor);
ScaleOperationTask scaleRequesthandler = new ScaleOperationTask(streamMetadataTasks, streamStore2, executor);
// create txn on epoch 0 and set it to committing
UUID txnId = streamStore1.generateTransactionId(scope, stream, null, executor).join();
VersionedTransactionData txnEpoch0 = streamStore1.createTransaction(scope, stream, txnId, 1000L, 10000L, null, executor).join();
streamStore1.sealTransaction(scope, stream, txnId, true, Optional.of(txnEpoch0.getVersion()), "", Long.MIN_VALUE, null, executor).join();
// perform scale
ScaleOpEvent event = new ScaleOpEvent(scope, stream, Lists.newArrayList(0L), Lists.newArrayList(new AbstractMap.SimpleEntry<>(0.0, 1.0)), false, System.currentTimeMillis(), System.currentTimeMillis());
scaleRequesthandler.execute(event).join();
// regular commit
// start commit transactions
CompletableFuture<Void> wait = new CompletableFuture<>();
CompletableFuture<Void> signal = new CompletableFuture<>();
// test rolling transaction --> since transaction on epoch 0 is committing, it will get committed first.
CommitEvent commitOnEpoch0 = new CommitEvent(scope, stream, 0);
setMockCommitTxnLatch(streamStore1, streamStore1Spied, func, signal, wait);
// start rolling txn
// stall rolling transaction in different stages
CompletableFuture<Void> future1Rolling = CompletableFuture.completedFuture(null).thenComposeAsync(v -> requestHandler1.execute(commitOnEpoch0), executor);
signal.join();
requestHandler2.execute(commitOnEpoch0).join();
wait.complete(null);
if (expectFailureOnFirstJob) {
AssertExtensions.assertSuppliedFutureThrows("first commit should fail", () -> future1Rolling, firstExceptionPredicate);
verify(streamStore1Spied, times(invocationCount.get("startCommitTransactions"))).startCommitTransactions(anyString(), anyString(), anyInt(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("startRollingTxn"))).startRollingTxn(anyString(), anyString(), anyInt(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("rollingTxnCreateDuplicateEpochs"))).rollingTxnCreateDuplicateEpochs(anyString(), anyString(), any(), anyLong(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeRollingTxn"))).completeRollingTxn(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("completeCommitTransactions"))).completeCommitTransactions(anyString(), anyString(), any(), any(), any(), any());
verify(streamStore1Spied, times(invocationCount.get("updateVersionedState"))).updateVersionedState(anyString(), anyString(), any(), any(), any(), any());
} else {
future1Rolling.join();
}
// validate rolling txn done and first job has updated the CTR with new txn record
VersionedMetadata<CommittingTransactionsRecord> versioned = streamStore1.getVersionedCommittingTransactionsRecord(scope, stream, null, executor).join();
assertEquals(CommittingTransactionsRecord.EMPTY, versioned.getObject());
assertEquals(expectedVersion, getVersionNumber(versioned.getVersion()));
assertEquals(3, streamStore1.getActiveEpoch(scope, stream, null, true, executor).join().getEpoch());
assertEquals(State.ACTIVE, streamStore1.getState(scope, stream, true, null, executor).join());
}
Aggregations